code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import colorsys from PIL import Image # type: ignore def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :str , UpperCAmelCase__ :int ): '''simple docstring''' a = x a = y for step in range(UpperCAmelCase__ ): # noqa: B007 a = a * a - b * b + x a = 2 * a * b + y a = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (2_55, 2_55, 2_55) def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] = 8_00 , UpperCAmelCase__ :Tuple = 6_00 , UpperCAmelCase__ :Any = -0.6 , UpperCAmelCase__ :List[Any] = 0 , UpperCAmelCase__ :Union[str, Any] = 3.2 , UpperCAmelCase__ :List[Any] = 50 , UpperCAmelCase__ :int = True , ): '''simple docstring''' a = Image.new("RGB" , (image_width, image_height) ) a = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase__ ): for image_y in range(UpperCAmelCase__ ): # determine the figure-coordinates based on the image-coordinates a = figure_width / image_width * image_height a = figure_center_x + (image_x / image_width - 0.5) * figure_width a = figure_center_y + (image_y / image_height - 0.5) * figure_height a = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: a = get_color_coded_rgb(UpperCAmelCase__ ) else: a = get_black_and_white_rgb(UpperCAmelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure A_ : List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
713
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self : List[str] ) -> List[str]: """simple docstring""" a = "" a = "" a = [] a = 0 a = 256 a = 0 a = 0 a = 0 a = 0 def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) a = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def A ( self : Any ) -> int: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def A ( self : Any ) -> int: """simple docstring""" cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') A_ : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Union[str, Any] = logging.get_logger(__name__) A_ : Optional[int] = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''switch_transformers''' _UpperCAmelCase = ['''past_key_values'''] _UpperCAmelCase = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , __lowerCAmelCase : int=3_2128 , __lowerCAmelCase : Optional[Any]=768 , __lowerCAmelCase : Optional[Any]=64 , __lowerCAmelCase : Optional[int]=2048 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : int=12 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[int]=8 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Any=0.0_1 , __lowerCAmelCase : Union[str, Any]="float32" , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=32 , __lowerCAmelCase : Tuple=128 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=1E-6 , __lowerCAmelCase : Union[str, Any]=0.0_0_1 , __lowerCAmelCase : Any=0.0_0_1 , __lowerCAmelCase : Dict=1.0 , __lowerCAmelCase : Optional[int]="relu" , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : Optional[Any]=1 , **__lowerCAmelCase : Optional[int] , ) -> int: """simple docstring""" a = vocab_size a = d_model a = d_kv a = d_ff a = num_sparse_encoder_layers a = num_layers a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: a = self.num_layers // self.num_sparse_encoder_layers else: a = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: a = self.num_decoder_layers // self.num_sparse_decoder_layers else: a = self.num_decoder_layers # HACK: this will create 0 sparse layers a = num_heads a = num_experts a = expert_capacity a = router_bias a = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" ) a = router_dtype a = router_ignore_padding_tokens a = relative_attention_num_buckets a = relative_attention_max_distance a = dropout_rate a = layer_norm_epsilon a = initializer_factor a = feed_forward_proj a = use_cache a = add_router_probs a = router_z_loss_coef a = router_aux_loss_coef a = self.feed_forward_proj.split("-" ) a = act_info[-1] a = act_info[0] == "gated" if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a = "gelu_new" super().__init__( pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase , )
714
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" a = self.unet.config.sample_size a = (batch_size, 3, img_size, img_size) a = self.unet a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma a = sample.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) self.scheduler.set_sigmas(__lowerCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # prediction step a = model(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) a , a = output.prev_sample, output.prev_sample_mean a = sample_mean.clamp(0 , 1 ) a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__lowerCAmelCase )
32
0
import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency A_ : Union[str, Any] = { '''E''': 12.70, '''T''': 9.06, '''A''': 8.17, '''O''': 7.51, '''I''': 6.97, '''N''': 6.75, '''S''': 6.33, '''H''': 6.09, '''R''': 5.99, '''D''': 4.25, '''L''': 4.03, '''C''': 2.78, '''U''': 2.76, '''M''': 2.41, '''W''': 2.36, '''F''': 2.23, '''G''': 2.02, '''Y''': 1.97, '''P''': 1.93, '''B''': 1.29, '''V''': 0.98, '''K''': 0.77, '''J''': 0.15, '''X''': 0.15, '''Q''': 0.10, '''Z''': 0.07, } A_ : Optional[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ''' A_ : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' a = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def UpperCAmelCase__ ( UpperCAmelCase__ :tuple ): '''simple docstring''' return x[0] def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' a = get_letter_count(_UpperCamelCase ) a = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(_UpperCamelCase ) a = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_UpperCamelCase ) a = "".join(freq_to_letter[freq] ) a = list(freq_to_letter_str.items() ) freq_pairs.sort(key=_UpperCamelCase , reverse=_UpperCamelCase ) a = [freq_pair[1] for freq_pair in freq_pairs] return "".join(_UpperCamelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' a = get_frequency_order(_UpperCamelCase ) a = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
715
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
0
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] = 8 ): '''simple docstring''' a = ascii_letters + digits + punctuation return "".join(secrets.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) ) def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :Optional[int] ): '''simple docstring''' i -= len(UpperCAmelCase__ ) a = i // 3 a = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) a = ( chars_incl + random(UpperCAmelCase__ , quotient + remainder ) + random(UpperCAmelCase__ , UpperCAmelCase__ ) + random(UpperCAmelCase__ , UpperCAmelCase__ ) ) a = list(UpperCAmelCase__ ) shuffle(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) # random is a generalised function for letters, characters and numbers def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Dict ): '''simple docstring''' return "".join(secrets.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Dict ): '''simple docstring''' pass # Put your code here... def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' pass # Put your code here... def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :List[Any] ): '''simple docstring''' pass # Put your code here... def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :Dict = 8 ): '''simple docstring''' if len(UpperCAmelCase__ ) < min_length: # Your Password must be at least 8 characters long return False a = any(char in ascii_uppercase for char in password ) a = any(char in ascii_lowercase for char in password ) a = any(char in digits for char in password ) a = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def UpperCAmelCase__ ( ): '''simple docstring''' a = int(input("Please indicate the max length of your password: " ).strip() ) a = input( "Please indicate the characters that must be in your password: " ).strip() print("Password generated:" , password_generator(UpperCAmelCase__ ) ) print( "Alternative Password generated:" , alternative_password_generator(UpperCAmelCase__ , UpperCAmelCase__ ) , ) print("[If you are thinking of using this passsword, You better save it.]" ) if __name__ == "__main__": main()
716
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) a = import_module("tasks" ) try: a = getattr(UpperCAmelCase__ , model_args.task_type ) a = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task a = token_classification_task.get_labels(data_args.labels ) a = dict(enumerate(UpperCAmelCase__ ) ) a = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) a = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]: a = np.argmax(UpperCAmelCase__ , axis=2 ) a , a = preds.shape a = [[] for _ in range(UpperCAmelCase__ )] a = [[] for _ in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict: a , a = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), "precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), "recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), "f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } # Data collator a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(UpperCAmelCase__ ) # Predict if training_args.do_predict: a = TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) a , a , a = trainer.predict(UpperCAmelCase__ ) a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) a = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions a = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return results def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
32
0
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging A_ : Tuple = logging.get_logger(__name__) def UpperCAmelCase__ ( ): a = os.getenv("SM_HP_MP_PARAMETERS" , "{}" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. a = json.loads(UpperCAmelCase__ ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. a = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". a = json.loads(UpperCAmelCase__ ) if not mpi_options.get("sagemaker_mpi_enabled" , UpperCAmelCase__ ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class _lowercase ( __lowerCAmelCase ): _UpperCAmelCase = field( default='''''', metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''}, ) def A ( self : List[Any] ) -> str: """simple docstring""" super().__post_init__() warnings.warn( "`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use " "`TrainingArguments` instead." , lowerCamelCase__ , ) @cached_property def A ( self : Dict ) -> "torch.device": """simple docstring""" logger.info("PyTorch: setting up devices" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( "torch.distributed process group is initialized, but local_rank == -1. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if self.no_cuda: a = torch.device("cpu" ) a = 0 elif is_sagemaker_model_parallel_available(): a = smp.local_rank() a = torch.device("cuda" , lowerCamelCase__ ) a = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta ) a = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) ) a = torch.device("cuda" , self.local_rank ) a = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 a = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. a = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta ) a = torch.device("cuda" , self.local_rank ) a = 1 if device.type == "cuda": torch.cuda.set_device(lowerCamelCase__ ) return device @property def A ( self : List[Any] ) -> int: """simple docstring""" if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def A ( self : List[Any] ) -> Any: """simple docstring""" return not is_sagemaker_model_parallel_available() @property def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" return False
717
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
0
from __future__ import annotations from random import choice def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return choice(snake_case_ ) def UpperCAmelCase__ ( UpperCAmelCase__ :list[int] , UpperCAmelCase__ :int ): '''simple docstring''' a = random_pivot(snake_case_ ) # partition based on pivot # linear time a = [e for e in lst if e < pivot] a = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(snake_case_ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(snake_case_ ) < k - 1: return kth_number(snake_case_ , k - len(snake_case_ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(snake_case_ , snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
718
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : List[str] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
32
0
import pytest import datasets # Import fixture modules as plugins A_ : Any = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""] def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :str ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ["integration", "unit"] ): continue item.add_marker(pytest.mark.unit ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" ) @pytest.fixture(autouse=UpperCamelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' a = tmp_path_factory.getbasetemp() / "cache" a = test_hf_cache_home / "datasets" a = test_hf_cache_home / "metrics" a = test_hf_cache_home / "modules" monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(UpperCamelCase__ ) ) monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(UpperCamelCase__ ) ) monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(UpperCamelCase__ ) ) a = test_hf_datasets_cache / "downloads" monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(UpperCamelCase__ ) ) a = test_hf_datasets_cache / "downloads" / "extracted" monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(UpperCamelCase__ ) ) @pytest.fixture(autouse=UpperCamelCase__ , scope="session" ) def UpperCAmelCase__ ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=UpperCamelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , UpperCamelCase__ ) @pytest.fixture def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , UpperCamelCase__ )
719
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any: """simple docstring""" a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : str ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a , a , a , a , a = self.prepare_config_and_inputs() a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): a = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) a = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : int ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer is not a generative model" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : List[str] ) -> Any: """simple docstring""" pass def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self : str ) -> Dict: """simple docstring""" a = (self.model_tester.min_size,) * 2 a = { "pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : List[str] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ : int = 1E-4 def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[int]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) a = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : int ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) a = inputs["pixel_values"].to(__lowerCAmelCase ) a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]] a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
32
0
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class _lowercase ( UpperCAmelCase__ ): def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[NestedDataStructureLike[PathLike]] = None , __lowerCAmelCase : Optional[NamedSplit] = None , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Optional[Any] , ) -> Tuple: """simple docstring""" a = path_or_paths a = split if split or isinstance(__lowerCAmelCase , __lowerCAmelCase ) else "train" a = features a = cache_dir a = keep_in_memory a = streaming a = num_proc a = kwargs @abstractmethod def A ( self : List[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: """simple docstring""" pass class _lowercase ( UpperCAmelCase__ ): def __init__( self : Any , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : int , ) -> Dict: """simple docstring""" a = features a = cache_dir a = keep_in_memory a = streaming a = num_proc a = kwargs @abstractmethod def A ( self : Optional[int] ) -> Union[Dataset, IterableDataset]: """simple docstring""" pass
720
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = [[1, 2, 4], [1, 2, 3, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(__lowerCAmelCase ) # fails here def A ( self : int ) -> Any: """simple docstring""" a = [[1, 2, 3], [1, 2, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(3 ) a = stepped is True and completed is True and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
0
import numpy as np from PIL import Image def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' a = np.array(UpperCAmelCase__ ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) a = 0 a = 0 a = 0 a = 0 # compute the shape of the output matrix a = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape a = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix a = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 a = 0 a = 0 return updated_arr def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' a = np.array(UpperCAmelCase__ ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) a = 0 a = 0 a = 0 a = 0 # compute the shape of the output matrix a = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape a = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix a = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 a = 0 a = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image A_ : int = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
721
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
32
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Dict = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''luke''' def __init__( self : Tuple , __lowerCAmelCase : Dict=5_0267 , __lowerCAmelCase : List[Any]=50_0000 , __lowerCAmelCase : Optional[Any]=768 , __lowerCAmelCase : Union[str, Any]=256 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : List[str]=3072 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Union[str, Any]=512 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-12 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any=1 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Dict=2 , **__lowerCAmelCase : Tuple , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = entity_vocab_size a = hidden_size a = entity_emb_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = use_entity_aware_attention a = classifier_dropout
700
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' ) class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> int: """simple docstring""" a = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase ) @slow def A ( self : Optional[int] ) -> List[str]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def A ( self : Dict ) -> Any: """simple docstring""" a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
32
0
import numpy as np import datasets A_ : Tuple = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' A_ : List[Any] = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' A_ : Optional[Any] = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def A ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = np.array(__lowerCAmelCase ) a = np.array(__lowerCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction a = X - np.mean(__lowerCAmelCase ) a = np.cov(reference_distribution.T ) try: a = np.linalg.inv(__lowerCAmelCase ) except np.linalg.LinAlgError: a = np.linalg.pinv(__lowerCAmelCase ) a = np.dot(__lowerCAmelCase , __lowerCAmelCase ) a = np.dot(__lowerCAmelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa A_ : Any = logging.getLogger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''summarization''' _UpperCAmelCase = ['''loss'''] _UpperCAmelCase = ROUGE_KEYS _UpperCAmelCase = '''rouge2''' def __init__( self : int , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ) -> Tuple: """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: a = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" ) if hparams.sortish_sampler: raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" ) super().__init__(__lowerCAmelCase , num_labels=__lowerCAmelCase , mode=self.mode , **__lowerCAmelCase ) use_task_specific_params(self.model , "summarization" ) save_git_info(self.hparams.output_dir ) a = Path(self.output_dir ) / "metrics.json" a = Path(self.output_dir ) / "hparams.pkl" pickle_save(self.hparams , self.hparams_save_path ) a = 0 a = defaultdict(__lowerCAmelCase ) a = self.config.model_type a = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size a = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } a = { "train": self.hparams.n_train, "val": self.hparams.n_val, "test": self.hparams.n_test, } a = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} a = { "train": self.hparams.max_target_length, "val": self.hparams.val_max_target_length, "test": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}""" assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}""" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) a = get_git_info()["repo_sha"] a = hparams.num_workers a = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __lowerCAmelCase ): a = self.tokenizer.lang_code_to_id[hparams.tgt_lang] a = self.decoder_start_token_id a = ( SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset ) a = False a = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: a = self.hparams.eval_max_gen_length else: a = self.model.config.max_length a = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def A ( self : Tuple , __lowerCAmelCase : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]: """simple docstring""" a = { k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items() } save_json(__lowerCAmelCase , Path(self.output_dir ) / "text_batch.json" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" ) a = True return readable_batch def A ( self : str , __lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ) -> List[Any]: """simple docstring""" return self.model(__lowerCAmelCase , **__lowerCAmelCase ) def A ( self : Dict , __lowerCAmelCase : List[int] ) -> Optional[Any]: """simple docstring""" a = self.tokenizer.batch_decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) return lmap(str.strip , __lowerCAmelCase ) def A ( self : int , __lowerCAmelCase : dict ) -> Tuple: """simple docstring""" a = self.tokenizer.pad_token_id a , a = batch["input_ids"], batch["attention_mask"] a = batch["labels"] if isinstance(self.model , __lowerCAmelCase ): a = self.model._shift_right(__lowerCAmelCase ) else: a = shift_tokens_right(__lowerCAmelCase , __lowerCAmelCase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero a = decoder_input_ids self.save_readable_batch(__lowerCAmelCase ) a = self(__lowerCAmelCase , attention_mask=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , use_cache=__lowerCAmelCase ) a = outputs["logits"] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id a = nn.CrossEntropyLoss(ignore_index=__lowerCAmelCase ) assert lm_logits.shape[-1] == self.vocab_size a = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: a = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) a , a = label_smoothed_nll_loss( __lowerCAmelCase , __lowerCAmelCase , self.hparams.label_smoothing , ignore_index=__lowerCAmelCase ) return (loss,) @property def A ( self : str ) -> int: """simple docstring""" return self.tokenizer.pad_token_id def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> Dict: """simple docstring""" a = self._step(__lowerCAmelCase ) a = dict(zip(self.loss_names , __lowerCAmelCase ) ) # tokens per batch a = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum() a = batch["input_ids"].shape[0] a = batch["input_ids"].eq(self.pad ).sum() a = batch["input_ids"].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def A ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Dict: """simple docstring""" return self._generative_step(__lowerCAmelCase ) def A ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str="val" ) -> Dict: """simple docstring""" self.step_count += 1 a = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} a = losses["loss"] a = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"] } a = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) a = torch.tensor(__lowerCAmelCase ).type_as(__lowerCAmelCase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(__lowerCAmelCase ) a = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()} a = self.step_count self.metrics[prefix].append(__lowerCAmelCase ) # callback writes this to self.metrics_save_path a = flatten_list([x["preds"] for x in outputs] ) return { "log": all_metrics, "preds": preds, f"""{prefix}_loss""": loss, f"""{prefix}_{self.val_metric}""": metric_tensor, } def A ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Dict: """simple docstring""" return calculate_rouge(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : dict ) -> dict: """simple docstring""" a = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') a = self.model.generate( batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=__lowerCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) a = (time.time() - ta) / batch["input_ids"].shape[0] a = self.ids_to_clean_text(__lowerCAmelCase ) a = self.ids_to_clean_text(batch["labels"] ) a = self._step(__lowerCAmelCase ) a = dict(zip(self.loss_names , __lowerCAmelCase ) ) a = self.calc_generative_metrics(__lowerCAmelCase , __lowerCAmelCase ) a = np.mean(lmap(__lowerCAmelCase , __lowerCAmelCase ) ) base_metrics.update(gen_time=__lowerCAmelCase , gen_len=__lowerCAmelCase , preds=__lowerCAmelCase , target=__lowerCAmelCase , **__lowerCAmelCase ) return base_metrics def A ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" return self._generative_step(__lowerCAmelCase ) def A ( self : str , __lowerCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" return self.validation_epoch_end(__lowerCAmelCase , prefix="test" ) def A ( self : int , __lowerCAmelCase : List[Any] ) -> SeqaSeqDataset: """simple docstring""" a = self.n_obs[type_path] a = self.target_lens[type_path] a = self.dataset_class( self.tokenizer , type_path=__lowerCAmelCase , n_obs=__lowerCAmelCase , max_target_length=__lowerCAmelCase , **self.dataset_kwargs , ) return dataset def A ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : bool = False ) -> DataLoader: """simple docstring""" a = self.get_dataset(__lowerCAmelCase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": a = dataset.make_sortish_sampler(__lowerCAmelCase , distributed=self.hparams.gpus > 1 ) return DataLoader( __lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCAmelCase , num_workers=self.num_workers , sampler=__lowerCAmelCase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": a = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( __lowerCAmelCase , batch_sampler=__lowerCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( __lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCAmelCase , num_workers=self.num_workers , sampler=__lowerCAmelCase , ) def A ( self : Union[str, Any] ) -> DataLoader: """simple docstring""" a = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=__lowerCAmelCase ) return dataloader def A ( self : Any ) -> DataLoader: """simple docstring""" return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size ) def A ( self : List[Any] ) -> DataLoader: """simple docstring""" return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size ) @staticmethod def A ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Tuple: """simple docstring""" BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase ) add_generic_args(__lowerCAmelCase , __lowerCAmelCase ) parser.add_argument( "--max_source_length" , default=1024 , type=__lowerCAmelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--max_target_length" , default=56 , type=__lowerCAmelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--val_max_target_length" , default=142 , type=__lowerCAmelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--test_max_target_length" , default=142 , type=__lowerCAmelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument("--freeze_encoder" , action="store_true" ) parser.add_argument("--freeze_embeds" , action="store_true" ) parser.add_argument("--sortish_sampler" , action="store_true" , default=__lowerCAmelCase ) parser.add_argument("--overwrite_output_dir" , action="store_true" , default=__lowerCAmelCase ) parser.add_argument("--max_tokens_per_batch" , type=__lowerCAmelCase , default=__lowerCAmelCase ) parser.add_argument("--logger_name" , type=__lowerCAmelCase , choices=["default", "wandb", "wandb_shared"] , default="default" ) parser.add_argument("--n_train" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help="# examples. -1 means use all." ) parser.add_argument("--n_val" , type=__lowerCAmelCase , default=500 , required=__lowerCAmelCase , help="# examples. -1 means use all." ) parser.add_argument("--n_test" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help="# examples. -1 means use all." ) parser.add_argument( "--task" , type=__lowerCAmelCase , default="summarization" , required=__lowerCAmelCase , help="# examples. -1 means use all." ) parser.add_argument("--label_smoothing" , type=__lowerCAmelCase , default=0.0 , required=__lowerCAmelCase ) parser.add_argument("--src_lang" , type=__lowerCAmelCase , default="" , required=__lowerCAmelCase ) parser.add_argument("--tgt_lang" , type=__lowerCAmelCase , default="" , required=__lowerCAmelCase ) parser.add_argument("--eval_beams" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase ) parser.add_argument( "--val_metric" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , choices=["bleu", "rouge2", "loss", None] ) parser.add_argument("--eval_max_gen_length" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="never generate more than n tokens" ) parser.add_argument("--save_top_k" , type=__lowerCAmelCase , default=1 , required=__lowerCAmelCase , help="How many checkpoints to save" ) parser.add_argument( "--early_stopping_patience" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help=( "-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So" " val_check_interval will effect it." ) , ) return parser class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''translation''' _UpperCAmelCase = ['''loss'''] _UpperCAmelCase = ['''bleu'''] _UpperCAmelCase = '''bleu''' def __init__( self : Union[str, Any] , __lowerCAmelCase : str , **__lowerCAmelCase : Tuple ) -> List[str]: """simple docstring""" super().__init__(__lowerCAmelCase , **__lowerCAmelCase ) a = hparams.src_lang a = hparams.tgt_lang def A ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> dict: """simple docstring""" return calculate_bleu(__lowerCAmelCase , __lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :int=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase__ ) check_output_dir(UpperCAmelCase__ , expected_items=3 ) if model is None: if "summarization" in args.task: a = SummarizationModule(UpperCAmelCase__ ) else: a = TranslationModule(UpperCAmelCase__ ) a = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("/tmp" ) or str(args.output_dir ).startswith("/var" ) ): a = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger a = os.environ.get("WANDB_PROJECT" , UpperCAmelCase__ ) a = WandbLogger(name=model.output_dir.name , project=UpperCAmelCase__ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger a = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" ) if args.early_stopping_patience >= 0: a = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: a = False a = args.val_metric == "loss" a = generic_train( UpperCAmelCase__ , UpperCAmelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , UpperCAmelCase__ ) , early_stopping_callback=UpperCAmelCase__ , logger=UpperCAmelCase__ , ) pickle_save(model.hparams , model.output_dir / "hparams.pkl" ) if not args.do_predict: return model a = "" a = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=UpperCAmelCase__ ) ) if checkpoints: a = checkpoints[-1] a = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": A_ : str = argparse.ArgumentParser() A_ : Union[str, Any] = pl.Trainer.add_argparse_args(parser) A_ : Tuple = SummarizationModule.add_model_specific_args(parser, os.getcwd()) A_ : Any = parser.parse_args() main(args)
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list ): '''simple docstring''' a = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCAmelCase__ )] ) a = np.array(UpperCAmelCase__ ) a = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCAmelCase__ ) ) , x.transpose() ) , UpperCAmelCase__ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list ): '''simple docstring''' a = (1, 2, 1) a = (1, 1, 0, 7) a = SARIMAX( UpperCAmelCase__ , exog=UpperCAmelCase__ , order=UpperCAmelCase__ , seasonal_order=UpperCAmelCase__ ) a = model.fit(disp=UpperCAmelCase__ , maxiter=6_00 , method="nm" ) a = model_fit.predict(1 , len(UpperCAmelCase__ ) , exog=[test_match] ) return result[0] def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list ): '''simple docstring''' a = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(UpperCAmelCase__ , UpperCAmelCase__ ) a = regressor.predict(UpperCAmelCase__ ) return y_pred[0] def UpperCAmelCase__ ( UpperCAmelCase__ :list ): '''simple docstring''' train_user.sort() a = np.percentile(UpperCAmelCase__ , 25 ) a = np.percentile(UpperCAmelCase__ , 75 ) a = qa - qa a = qa - (iqr * 0.1) return low_lim def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :float ): '''simple docstring''' a = 0 a = 0 for i in list_vote: if i > actual_result: a = not_safe + 1 else: if abs(abs(UpperCAmelCase__ ) - abs(UpperCAmelCase__ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) A_ : List[str] = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]] A_ : Dict = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) A_ : str = Normalizer().fit_transform(data_input_df.values) # split data A_ : int = normalize_df[:, 2].tolist() A_ : List[str] = normalize_df[:, 0].tolist() A_ : List[Any] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) A_ : Tuple = normalize_df[:, [1, 2]].tolist() A_ : List[Any] = x[: len(x) - 1] A_ : Optional[Any] = x[len(x) - 1 :] # for linear regression & sarimax A_ : Optional[int] = total_date[: len(total_date) - 1] A_ : Any = total_user[: len(total_user) - 1] A_ : Tuple = total_match[: len(total_match) - 1] A_ : List[str] = total_date[len(total_date) - 1 :] A_ : str = total_user[len(total_user) - 1 :] A_ : Any = total_match[len(total_match) - 1 :] # voting system with forecasting A_ : Tuple = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data A_ : Tuple = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
703
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = StableDiffusionPanoramaPipeline _UpperCAmelCase = TEXT_TO_IMAGE_PARAMS _UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS def A ( self : Tuple ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) a = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) a = DDIMScheduler() torch.manual_seed(0 ) a = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) a = CLIPTextModel(__lowerCAmelCase ) a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) a = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def A ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict=0 ) -> Optional[int]: """simple docstring""" a = torch.manual_seed(__lowerCAmelCase ) a = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def A ( self : Optional[int] ) -> str: """simple docstring""" a = "cpu" # ensure determinism for the device-dependent torch.Generator a = self.get_dummy_components() a = StableDiffusionPanoramaPipeline(**__lowerCAmelCase ) a = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a = self.get_dummy_inputs(__lowerCAmelCase ) a = sd_pipe(**__lowerCAmelCase ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : str ) -> Dict: """simple docstring""" super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def A ( self : Any ) -> Optional[Any]: """simple docstring""" super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a = "cpu" # ensure determinism for the device-dependent torch.Generator a = self.get_dummy_components() a = StableDiffusionPanoramaPipeline(**__lowerCAmelCase ) a = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a = self.get_dummy_inputs(__lowerCAmelCase ) a = "french fries" a = sd_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase ) a = output.images a = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : List[str] ) -> str: """simple docstring""" a = "cpu" # ensure determinism for the device-dependent torch.Generator a = self.get_dummy_components() a = StableDiffusionPanoramaPipeline(**__lowerCAmelCase ) a = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a = self.get_dummy_inputs(__lowerCAmelCase ) a = sd_pipe(**__lowerCAmelCase , view_batch_size=2 ) a = output.images a = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : str ) -> Any: """simple docstring""" a = "cpu" # ensure determinism for the device-dependent torch.Generator a = self.get_dummy_components() a = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" ) a = StableDiffusionPanoramaPipeline(**__lowerCAmelCase ) a = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a = self.get_dummy_inputs(__lowerCAmelCase ) a = sd_pipe(**__lowerCAmelCase ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = "cpu" # ensure determinism for the device-dependent torch.Generator a = self.get_dummy_components() a = PNDMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , skip_prk_steps=__lowerCAmelCase ) a = StableDiffusionPanoramaPipeline(**__lowerCAmelCase ) a = sd_pipe.to(__lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a = self.get_dummy_inputs(__lowerCAmelCase ) a = sd_pipe(**__lowerCAmelCase ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : Optional[Any] , __lowerCAmelCase : List[str]=0 ) -> Tuple: """simple docstring""" a = torch.manual_seed(__lowerCAmelCase ) a = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = "stabilityai/stable-diffusion-2-base" a = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder="scheduler" ) a = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() a = self.get_inputs() a = pipe(**__lowerCAmelCase ).images a = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) a = np.array( [ 0.3_6_9_6_8_3_9_2, 0.2_7_0_2_5_3_7_2, 0.3_2_4_4_6_7_6_6, 0.2_8_3_7_9_3_8_7, 0.3_6_3_6_3_2_7_4, 0.3_0_7_3_3_3_4_7, 0.2_7_1_0_0_0_2_7, 0.2_7_0_5_4_1_2_5, 0.2_5_5_3_6_0_9_6, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = StableDiffusionPanoramaPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base" , safety_checker=__lowerCAmelCase ) a = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() a = self.get_inputs() a = pipe(**__lowerCAmelCase ).images a = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) a = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def A ( self : Dict ) -> int: """simple docstring""" a = 0 def callback_fn(__lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor ) -> None: a = True nonlocal number_of_steps number_of_steps += 1 if step == 1: a = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) a = latents[0, -3:, -3:, -1] a = np.array( [ 0.1_8_6_8_1_8_6_9, 0.3_3_9_0_7_8_1_6, 0.5_3_6_1_2_7_6, 0.1_4_4_3_2_8_6_5, -0.0_2_8_5_6_6_1_1, -0.7_3_9_4_1_1_2_3, 0.2_3_3_9_7_9_8_7, 0.4_7_3_2_2_6_8_2, -0.3_7_8_2_3_1_6_4, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: a = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) a = latents[0, -3:, -3:, -1] a = np.array( [ 0.1_8_5_3_9_6_4_5, 0.3_3_9_8_7_2_4_8, 0.5_3_7_8_5_5_9, 0.1_4_4_3_7_1_4_2, -0.0_2_4_5_5_2_6_1, -0.7_3_3_8_3_1_7, 0.2_3_9_9_0_7_5_5, 0.4_7_3_5_6_2_7_2, -0.3_7_8_6_5_0_5, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 a = False a = "stabilityai/stable-diffusion-2-base" a = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder="scheduler" ) a = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase ) a = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() a = self.get_inputs() pipe(**__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def A ( self : List[str] ) -> Optional[Any]: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a = "stabilityai/stable-diffusion-2-base" a = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder="scheduler" ) a = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase ) a = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() a = self.get_inputs() a = pipe(**__lowerCAmelCase ) a = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
704
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
0
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class _lowercase ( unittest.TestCase ): @slow def A ( self : str ) -> str: """simple docstring""" a = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" ) a = AutoTokenizer.from_pretrained("xlm-roberta-base" ) a = "The dog is cute and lives in the garden house" a = jnp.array([tokenizer.encode(__lowerCAmelCase )] ) a = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim a = jnp.array( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) a = model(__lowerCAmelCase )["last_hidden_state"] self.assertEqual(output.shape , __lowerCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1E-3 ) )
705
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
32
0
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" a , a = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , ) a = "A painting of a squirrel eating a burger" a = jax.device_count() a = num_samples * [prompt] a = sd_pipe.prepare_inputs(__lowerCAmelCase ) a = replicate(__lowerCAmelCase ) a = shard(__lowerCAmelCase ) a = jax.random.PRNGKey(0 ) a = jax.random.split(__lowerCAmelCase , jax.device_count() ) a = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a = images[0, 253:256, 253:256, -1] a = jnp.asarray(jax.device_get(image_slice.flatten() ) ) a = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def A ( self : List[Any] ) -> Dict: """simple docstring""" a = "stabilityai/stable-diffusion-2" a , a = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCAmelCase , subfolder="scheduler" ) a , a = FlaxStableDiffusionPipeline.from_pretrained( __lowerCAmelCase , scheduler=__lowerCAmelCase , revision="bf16" , dtype=jnp.bfloataa , ) a = scheduler_params a = "A painting of a squirrel eating a burger" a = jax.device_count() a = num_samples * [prompt] a = sd_pipe.prepare_inputs(__lowerCAmelCase ) a = replicate(__lowerCAmelCase ) a = shard(__lowerCAmelCase ) a = jax.random.PRNGKey(0 ) a = jax.random.split(__lowerCAmelCase , jax.device_count() ) a = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a = images[0, 253:256, 253:256, -1] a = jnp.asarray(jax.device_get(image_slice.flatten() ) ) a = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
706
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : str = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = '''focalnet''' def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = use_conv_embed a = hidden_sizes a = depths a = focal_levels a = focal_windows a = hidden_act a = mlp_ratio a = hidden_dropout_prob a = drop_path_rate a = use_layerscale a = layerscale_value a = use_post_layernorm a = use_post_layernorm_in_modulation a = normalize_modulator a = initializer_range a = layer_norm_eps a = encoder_stride a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
32
0
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ) -> Dict: '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
707
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head: return True # split the list to two parts a , a = head.next, head while fast and fast.next: a = fast.next.next a = slow.next a = slow.next a = None # Don't forget here! But forget still works! # reverse the second part a = None while second: a = second.next a = node a = second a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a = node.next a = head.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a = a = a = head while fast and fast.next: a , a = fast.next.next, slow.next # 2. Push the second half into the stack a = [slow.val] while slow.next: a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a = cur.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head or not head.next: return True a = {} a = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase__ ) else: a = [pos] a = head.next pos += 1 a = pos - 1 a = 0 for v in d.values(): if len(UpperCAmelCase__ ) % 2 != 0: middle += 1 else: a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
0
'''simple docstring''' from pathlib import Path import numpy as np from PIL import Image def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray ): '''simple docstring''' a , a , a = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray ): '''simple docstring''' return (gray > 1_27) & (gray <= 2_55) def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ): '''simple docstring''' a = np.zeros_like(UpperCAmelCase__ ) a = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image a = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): a = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() a = int(summation > 0 ) return output if __name__ == "__main__": # read original image A_ : int = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg''' A_ : int = np.array(Image.open(lena_path)) # kernel to be applied A_ : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) A_ : Any = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image A_ : str = Image.fromarray(output).convert('''RGB''') pil_img.save('''result_dilation.png''')
708
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = embedding_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = MobileBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str: """simple docstring""" a = MobileBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]: """simple docstring""" a = MobileBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = MobileBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" a = self.num_labels a = MobileBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" a = self.num_labels a = MobileBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = self.num_choices a = MobileBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = MobileBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def A ( self : str ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def A ( self : List[Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self : int ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) A_ : Dict = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase ) a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=__lowerCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
32
0
from __future__ import annotations from math import pow, sqrt def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float ): '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance == 0: return {"resistance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(UpperCAmelCase__ , 2 ) + pow(UpperCAmelCase__ , 2 ) )} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
709
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
32
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : List[str] = logging.get_logger(__name__) A_ : Dict = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''levit''' def __init__( self : str , __lowerCAmelCase : List[str]=224 , __lowerCAmelCase : int=3 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Union[str, Any]=16 , __lowerCAmelCase : Tuple=[128, 256, 384] , __lowerCAmelCase : Union[str, Any]=[4, 8, 12] , __lowerCAmelCase : List[str]=[4, 4, 4] , __lowerCAmelCase : Any=[16, 16, 16] , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : Any=[2, 2, 2] , __lowerCAmelCase : Tuple=[2, 2, 2] , __lowerCAmelCase : Dict=0.0_2 , **__lowerCAmelCase : int , ) -> Dict: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = num_channels a = kernel_size a = stride a = padding a = hidden_sizes a = num_attention_heads a = depths a = key_dim a = drop_path_rate a = patch_size a = attention_ratio a = mlp_ratio a = initializer_range a = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = version.parse('''1.11''' ) @property def A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def A ( self : str ) -> float: """simple docstring""" return 1E-4
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
32
0
from collections.abc import Iterable from typing import Any class _lowercase : def __init__( self : Optional[int] , __lowerCAmelCase : int | None = None ) -> Optional[Any]: """simple docstring""" a = value a = None # Added in order to delete a node easier a = None a = None def __repr__( self : str ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 ) class _lowercase : def __init__( self : int , __lowerCAmelCase : Node | None = None ) -> str: """simple docstring""" a = root def __str__( self : str ) -> str: """simple docstring""" return str(self.root ) def A ( self : Any , __lowerCAmelCase : Node , __lowerCAmelCase : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids a = node.parent if node.parent is not None: # reset its parent if self.is_right(__lowerCAmelCase ): # If it is the right children a = new_children else: a = new_children else: a = new_children def A ( self : Tuple , __lowerCAmelCase : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def A ( self : str ) -> bool: """simple docstring""" return self.root is None def A ( self : int , __lowerCAmelCase : List[Any] ) -> None: """simple docstring""" a = Node(__lowerCAmelCase ) # create a new Node if self.empty(): # if Tree is empty a = new_node # set its root else: # Tree is not empty a = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: a = new_node # We insert the new node in a leaf break else: a = parent_node.left else: if parent_node.right is None: a = new_node break else: a = parent_node.right a = parent_node def A ( self : Any , *__lowerCAmelCase : Any ) -> None: """simple docstring""" for value in values: self.__insert(__lowerCAmelCase ) def A ( self : Tuple , __lowerCAmelCase : List[str] ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError("Warning: Tree is empty! please use another." ) else: a = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: a = node.left if value < node.value else node.right return node def A ( self : Dict , __lowerCAmelCase : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None a = self.root if not self.empty(): while node.right is not None: a = node.right return node def A ( self : Tuple , __lowerCAmelCase : Node | None = None ) -> Node | None: """simple docstring""" if node is None: a = self.root if self.root is None: return None if not self.empty(): a = self.root while node.left is not None: a = node.left return node def A ( self : Any , __lowerCAmelCase : int ) -> None: """simple docstring""" a = self.search(__lowerCAmelCase ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__lowerCAmelCase , __lowerCAmelCase ) elif node.left is None: # Has only right children self.__reassign_nodes(__lowerCAmelCase , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__lowerCAmelCase , node.left ) else: a = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore a = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def A ( self : Any , __lowerCAmelCase : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def A ( self : Dict , __lowerCAmelCase : Tuple=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def A ( self : str , __lowerCAmelCase : list , __lowerCAmelCase : Node | None ) -> None: """simple docstring""" if node: self.inorder(__lowerCAmelCase , node.left ) arr.append(node.value ) self.inorder(__lowerCAmelCase , node.right ) def A ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Node ) -> int: """simple docstring""" a = [] self.inorder(__lowerCAmelCase , __lowerCAmelCase ) # append all values to list using inorder traversal return arr[k - 1] def UpperCAmelCase__ ( UpperCAmelCase__ :Node | None ): '''simple docstring''' a = [] if curr_node is not None: a = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def UpperCAmelCase__ ( ): '''simple docstring''' a = (8, 3, 6, 1, 10, 14, 13, 4, 7) a = BinarySearchTree() for i in testlist: t.insert(UpperCAmelCase__ ) # Prints all the elements of the list in order traversal print(UpperCAmelCase__ ) if t.search(6 ) is not None: print("The value 6 exists" ) else: print("The value 6 doesn't exist" ) if t.search(-1 ) is not None: print("The value -1 exists" ) else: print("The value -1 doesn't exist" ) if not t.empty(): print("Max Value: " , t.get_max().value ) # type: ignore print("Min Value: " , t.get_min().value ) # type: ignore for i in testlist: t.remove(UpperCAmelCase__ ) print(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
711
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = (UniPCMultistepScheduler,) _UpperCAmelCase = (('''num_inference_steps''', 25),) def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__lowerCAmelCase ) return config def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: new_scheduler.config.solver_order] a , a = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[: new_scheduler.config.solver_order] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if scheduler is None: a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def A ( self : Any ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] a = dummy_past_residuals[: scheduler.config.solver_order] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = UniPCMultistepScheduler(**self.get_scheduler_config() ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a = DEISMultistepScheduler.from_config(scheduler.config ) a = DPMSolverMultistepScheduler.from_config(scheduler.config ) a = UniPCMultistepScheduler.from_config(scheduler.config ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : List[Any] ) -> Dict: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) a = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def A ( self : Optional[int] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def A ( self : Dict ) -> str: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def A ( self : Dict ) -> int: """simple docstring""" a = self.full_loop() a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
32
0
import logging from transformers import PretrainedConfig A_ : Tuple = logging.getLogger(__name__) A_ : Dict = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''bertabs''' def __init__( self : Optional[int] , __lowerCAmelCase : int=3_0522 , __lowerCAmelCase : Dict=512 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[Any]=0.2 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Tuple=768 , __lowerCAmelCase : int=8 , __lowerCAmelCase : str=2048 , __lowerCAmelCase : Any=0.2 , **__lowerCAmelCase : Optional[int] , ) -> Optional[int]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = vocab_size a = max_pos a = enc_layers a = enc_hidden_size a = enc_heads a = enc_ff_size a = enc_dropout a = dec_layers a = dec_hidden_size a = dec_heads a = dec_ff_size a = dec_dropout
712
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def A ( self : Optional[Any] ) -> int: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = ConvNextVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" a = ConvNextVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = ConvNextVaModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : Tuple ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def A ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" pass def A ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ]: continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : List[Any] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
32
0
'''simple docstring''' from math import isclose, sqrt def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float ): '''simple docstring''' a = point_y / 4 / point_x a = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) a = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) a = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 a = outgoing_gradient**2 + 4 a = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) a = (point_y - outgoing_gradient * point_x) ** 2 - 1_00 a = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) a = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point a = x_minus if isclose(UpperCAmelCase__ , UpperCAmelCase__ ) else x_plus a = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def UpperCAmelCase__ ( UpperCAmelCase__ :float = 1.4 , UpperCAmelCase__ :float = -9.6 ): '''simple docstring''' a = 0 a = first_x_coord a = first_y_coord a = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): a , a , a = next_point(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
713
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self : List[str] ) -> List[str]: """simple docstring""" a = "" a = "" a = [] a = 0 a = 256 a = 0 a = 0 a = 0 a = 0 def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) a = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def A ( self : Any ) -> int: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def A ( self : Any ) -> int: """simple docstring""" cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') A_ : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
0
from manim import * class _lowercase ( UpperCAmelCase__ ): def A ( self : int ) -> List[Any]: """simple docstring""" a = Rectangle(height=0.5 , width=0.5 ) a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) a = Rectangle(height=0.2_5 , width=0.2_5 ) a = [mem.copy() for i in range(6 )] a = [mem.copy() for i in range(6 )] a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) a = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) a = Text("CPU" , font_size=24 ) a = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCAmelCase ) a = [mem.copy() for i in range(4 )] a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) a = Text("GPU" , font_size=24 ) a = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCAmelCase ) a = [mem.copy() for i in range(6 )] a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) a = Text("Model" , font_size=24 ) a = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCAmelCase ) a = [] a = [] for i, rect in enumerate(__lowerCAmelCase ): a = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 ) target.move_to(__lowerCAmelCase ) model_arr.append(__lowerCAmelCase ) a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(__lowerCAmelCase ) self.add(*__lowerCAmelCase , *__lowerCAmelCase ) a = [meta_mem.copy() for i in range(6 )] a = [meta_mem.copy() for i in range(6 )] a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) a = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) a = Text("Disk" , font_size=24 ) a = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) disk.move_to([-4, -1.2_5, 0] ) self.add(__lowerCAmelCase , __lowerCAmelCase ) a = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) a = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCAmelCase , __lowerCAmelCase ) a = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowerCAmelCase ) a = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase ) ) a = Square(0.3 ) input.set_fill(__lowerCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 ) self.play(Write(__lowerCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.0_2 ) self.play(MoveToTarget(__lowerCAmelCase ) ) self.play(FadeOut(__lowerCAmelCase ) ) a = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) a = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase , run_time=3 ) ) a = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2} self.play( Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) a = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.0_2 , __lowerCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.0_2 ) a = AnimationGroup( FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(__lowerCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: a = 0.7 self.play( Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) a = a_c a = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 ) self.play( FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , ) a = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) ) self.wait()
714
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" a = self.unet.config.sample_size a = (batch_size, 3, img_size, img_size) a = self.unet a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma a = sample.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) self.scheduler.set_sigmas(__lowerCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # prediction step a = model(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) a , a = output.prev_sample, output.prev_sample_mean a = sample_mean.clamp(0 , 1 ) a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__lowerCAmelCase )
32
0
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer A_ : List[Any] = logging.get_logger(__name__) A_ : List[Any] = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } A_ : List[str] = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } A_ : List[str] = { '''facebook/blenderbot_small-90M''': 5_12, } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = BlenderbotSmallTokenizer def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Any="<|endoftext|>" , __lowerCAmelCase : Tuple="<|endoftext|>" , __lowerCAmelCase : str="<|endoftext|>" , __lowerCAmelCase : Any=False , __lowerCAmelCase : Union[str, Any]=True , **__lowerCAmelCase : int , ) -> str: """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=__lowerCAmelCase , merges=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , ) , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , **__lowerCAmelCase , ) a = add_prefix_space def A ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=None ) -> List[Any]: """simple docstring""" a = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
715
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Tuple = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys A_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
716
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) a = import_module("tasks" ) try: a = getattr(UpperCAmelCase__ , model_args.task_type ) a = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task a = token_classification_task.get_labels(data_args.labels ) a = dict(enumerate(UpperCAmelCase__ ) ) a = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) a = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]: a = np.argmax(UpperCAmelCase__ , axis=2 ) a , a = preds.shape a = [[] for _ in range(UpperCAmelCase__ )] a = [[] for _ in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict: a , a = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), "precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), "recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), "f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } # Data collator a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(UpperCAmelCase__ ) # Predict if training_args.do_predict: a = TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) a , a , a = trainer.predict(UpperCAmelCase__ ) a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) a = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions a = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return results def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
32
0
import socket def UpperCAmelCase__ ( ): a = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) a = socket.gethostname() a = 1_23_12 sock.connect((host, port) ) sock.send(b"Hello server!" ) with open("Received_file" , "wb" ) as out_file: print("File opened" ) print("Receiving data..." ) while True: a = sock.recv(10_24 ) if not data: break out_file.write(UpperCAmelCase__ ) print("Successfully received the file" ) sock.close() print("Connection closed" ) if __name__ == "__main__": main()
717
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
0
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Any=99 , __lowerCAmelCase : Optional[Any]=24 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=37 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=1000 , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = scope a = range_bbox def A ( self : Optional[Any] ) -> int: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a = bbox[i, j, 3] a = bbox[i, j, 1] a = t if bbox[i, j, 2] < bbox[i, j, 0]: a = bbox[i, j, 2] a = bbox[i, j, 0] a = t a = None if self.use_input_mask: a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def A ( self : Tuple ) -> List[Any]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def A ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , ) -> Optional[Any]: """simple docstring""" a = LiltModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , bbox=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , bbox=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , ) -> Optional[int]: """simple docstring""" a = self.num_labels a = LiltForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , ) -> Optional[int]: """simple docstring""" a = LiltForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Any ) -> Any: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ) -> int: """simple docstring""" return True def A ( self : int ) -> int: """simple docstring""" a = LiltModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : str ) -> int: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Dict ) -> Union[str, Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : List[Any] ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) @slow def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = LiltModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch @slow class _lowercase ( unittest.TestCase ): def A ( self : int ) -> int: """simple docstring""" a = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(__lowerCAmelCase ) a = torch.tensor([[1, 2]] , device=__lowerCAmelCase ) a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase ) a = torch.Size([1, 2, 768] ) a = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__lowerCAmelCase , ) self.assertTrue(outputs.last_hidden_state.shape , __lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowerCAmelCase , atol=1E-3 ) )
718
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : List[str] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
32
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ : List[Any] = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[Any] = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : str = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Tuple = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Tuple = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys A_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
719
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any: """simple docstring""" a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : str ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a , a , a , a , a = self.prepare_config_and_inputs() a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): a = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) a = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : int ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer is not a generative model" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : List[str] ) -> Any: """simple docstring""" pass def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self : str ) -> Dict: """simple docstring""" a = (self.model_tester.min_size,) * 2 a = { "pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : List[str] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ : int = 1E-4 def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[int]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) a = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : int ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) a = inputs["pixel_values"].to(__lowerCAmelCase ) a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]] a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
32
0
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :list[int] , UpperCAmelCase__ :list[int] , UpperCAmelCase__ :int ): '''simple docstring''' a = list(range(len(UpperCAmelCase__ ) ) ) a = [v / w for v, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] index.sort(key=lambda UpperCAmelCase__ : ratio[i] , reverse=UpperCAmelCase__ ) a = 0 a = [0] * len(UpperCAmelCase__ ) for i in index: if weight[i] <= capacity: a = 1 max_value += value[i] capacity -= weight[i] else: a = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
720
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = [[1, 2, 4], [1, 2, 3, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(__lowerCAmelCase ) # fails here def A ( self : int ) -> Any: """simple docstring""" a = [[1, 2, 3], [1, 2, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(3 ) a = stepped is True and completed is True and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. A_ : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class _lowercase ( unittest.TestCase ): _UpperCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _UpperCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _UpperCAmelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def A ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ) -> Any: """simple docstring""" a = ZeroShotClassificationPipeline( model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def A ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> Tuple: """simple docstring""" a = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase )]} ) # No kwarg a = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase )]} ) a = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase )]} ) a = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( __lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) a = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( __lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) a = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(__lowerCAmelCase , {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase )]} ) # https://github.com/huggingface/transformers/issues/13846 a = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( __lowerCAmelCase , [ {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]} for i in range(1 ) ] , ) a = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( __lowerCAmelCase , [ {"sequence": ANY(__lowerCAmelCase ), "labels": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], "scores": [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]} for i in range(2 ) ] , ) with self.assertRaises(__lowerCAmelCase ): classifier("" , candidate_labels="politics" ) with self.assertRaises(__lowerCAmelCase ): classifier(__lowerCAmelCase , candidate_labels="politics" ) with self.assertRaises(__lowerCAmelCase ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(__lowerCAmelCase ): classifier("Who are you voting for in 2020?" , candidate_labels=__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(__lowerCAmelCase ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=__lowerCAmelCase , ) self.run_entailment_id(__lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : Pipeline ) -> Dict: """simple docstring""" a = zero_shot_classifier.model.config a = config.labelaid a = zero_shot_classifier.entailment_id a = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) a = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) a = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) a = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) a = original_labelaid self.assertEqual(__lowerCAmelCase , zero_shot_classifier.entailment_id ) @require_torch def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" a = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] ) @require_torch def A ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" a = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) a = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def A ( self : int ) -> Union[str, Any]: """simple docstring""" a = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) a = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def A ( self : Optional[Any] ) -> Any: """simple docstring""" a = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) a = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) a = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=__lowerCAmelCase , ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) a = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) a = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=__lowerCAmelCase , ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
721
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
32
0
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() A_ : Optional[int] = logging.get_logger(__name__) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :List[Any] ): '''simple docstring''' a = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ ) a = downstream_dict["projector.weight"] a = downstream_dict["projector.bias"] a = downstream_dict["model.post_net.linear.weight"] a = downstream_dict["model.post_net.linear.bias"] return model def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Dict ): '''simple docstring''' a = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ ) a = downstream_dict["model.linear.weight"] a = downstream_dict["model.linear.bias"] return model def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :int , UpperCAmelCase__ :Any ): '''simple docstring''' a = WavaVecaForXVector.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ ) a = downstream_dict["connector.weight"] a = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): a = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] a = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] a = downstream_dict["objective.W"] return model @torch.no_grad() def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :str , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' a = torch.load(UpperCAmelCase__ , map_location="cpu" ) a = checkpoint["Downstream"] a = WavaVecaConfig.from_pretrained(UpperCAmelCase__ ) a = WavaVecaFeatureExtractor.from_pretrained( UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ ) a = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): a = convert_classification(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) elif arch.endswith("ForAudioFrameClassification" ): a = convert_diarization(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) elif arch.endswith("ForXVector" ): a = convert_xvector(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: a = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(UpperCAmelCase__ ) hf_model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Any = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') A_ : Any = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
700
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' ) class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> int: """simple docstring""" a = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase ) @slow def A ( self : Optional[int] ) -> List[str]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def A ( self : Dict ) -> Any: """simple docstring""" a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
32
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib A_ : Tuple = threading.Lock() A_ : Optional[logging.Handler] = None A_ : List[Any] = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } A_ : Optional[Any] = logging.WARNING A_ : List[Any] = True def UpperCAmelCase__ ( ): '''simple docstring''' a = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """ F"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def UpperCAmelCase__ ( ): '''simple docstring''' return __name__.split("." )[0] def UpperCAmelCase__ ( ): '''simple docstring''' return logging.getLogger(_get_library_name() ) def UpperCAmelCase__ ( ): '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return a = logging.StreamHandler() # Set sys.stderr as stream. a = sys.stderr.flush # Apply our default configuration to the library root logger. a = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) a = False def UpperCAmelCase__ ( ): '''simple docstring''' global _default_handler with _lock: if not _default_handler: return a = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) a = None def UpperCAmelCase__ ( ): '''simple docstring''' return log_levels def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[str] = None ): '''simple docstring''' if name is None: a = _get_library_name() _configure_library_root_logger() return logging.getLogger(UpperCAmelCase__ ) def UpperCAmelCase__ ( ): '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(UpperCAmelCase__ ) def UpperCAmelCase__ ( ): '''simple docstring''' return set_verbosity(UpperCAmelCase__ ) def UpperCAmelCase__ ( ): '''simple docstring''' return set_verbosity(UpperCAmelCase__ ) def UpperCAmelCase__ ( ): '''simple docstring''' return set_verbosity(UpperCAmelCase__ ) def UpperCAmelCase__ ( ): '''simple docstring''' return set_verbosity(UpperCAmelCase__ ) def UpperCAmelCase__ ( ): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def UpperCAmelCase__ ( ): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def UpperCAmelCase__ ( UpperCAmelCase__ :logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ :logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(UpperCAmelCase__ ) def UpperCAmelCase__ ( ): '''simple docstring''' _configure_library_root_logger() a = False def UpperCAmelCase__ ( ): '''simple docstring''' _configure_library_root_logger() a = True def UpperCAmelCase__ ( ): '''simple docstring''' a = _get_library_root_logger().handlers for handler in handlers: a = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" ) handler.setFormatter(UpperCAmelCase__ ) def UpperCAmelCase__ ( ): '''simple docstring''' a = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(UpperCAmelCase__ ) def UpperCAmelCase__ ( self :Any , *UpperCAmelCase__ :List[str] , **UpperCAmelCase__ :List[str] ): '''simple docstring''' a = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ ) if no_advisory_warnings: return self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ ) A_ : List[str] = warning_advice @functools.lru_cache(UpperCAmelCase__ ) def UpperCAmelCase__ ( self :Dict , *UpperCAmelCase__ :int , **UpperCAmelCase__ :int ): '''simple docstring''' self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ ) A_ : List[Any] = warning_once class _lowercase : def __init__( self : str , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : str ) -> Dict: # pylint: disable=unused-argument """simple docstring""" a = args[0] if args else None def __iter__( self : List[Any] ) -> Tuple: """simple docstring""" return iter(self._iterator ) def __getattr__( self : Dict , __lowerCAmelCase : str ) -> Optional[Any]: """simple docstring""" def empty_fn(*__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[int] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : Tuple ) -> Optional[int]: """simple docstring""" return self def __exit__( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" return class _lowercase : def __call__( self : List[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Any ) -> Optional[int]: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm(*__lowerCAmelCase , **__lowerCAmelCase ) else: return EmptyTqdm(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self : Dict , *__lowerCAmelCase : Any , **__lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self : List[Any] ) -> Tuple: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() A_ : List[str] = _tqdm_cls() def UpperCAmelCase__ ( ): '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def UpperCAmelCase__ ( ): '''simple docstring''' global _tqdm_active a = True hf_hub_utils.enable_progress_bars() def UpperCAmelCase__ ( ): '''simple docstring''' global _tqdm_active a = False hf_hub_utils.disable_progress_bars()
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0
from sklearn.metrics import mean_squared_error import datasets A_ : int = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' A_ : Optional[int] = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' A_ : str = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): def A ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def A ( self : Dict ) -> Tuple: """simple docstring""" if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def A ( self : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : str="uniform_average" , __lowerCAmelCase : List[Any]=True ) -> Any: """simple docstring""" a = mean_squared_error( __lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase , multioutput=__lowerCAmelCase , squared=__lowerCAmelCase ) return {"mse": mse}
703
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("only integers accepted as input" ) else: a = str(abs(UpperCAmelCase__ ) ) a = [list(UpperCAmelCase__ ) for char in range(len(UpperCAmelCase__ ) )] for index in range(len(UpperCAmelCase__ ) ): num_transpositions[index].pop(UpperCAmelCase__ ) return max( int("".join(list(UpperCAmelCase__ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
704
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
0
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :str , UpperCAmelCase__ :List[Any]=None ): '''simple docstring''' assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match""" a = nn.Parameter(UpperCAmelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match""" a = nn.Parameter(UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :int ): '''simple docstring''' a = np.asarray(weights[0] ) a = np.asarray(weights[1] ) a = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCAmelCase__ ).view(-1 , UpperCAmelCase__ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Any , UpperCAmelCase__ :Tuple ): '''simple docstring''' a = np.asarray(weights[0] ) a = np.asarray(weights[1] ) a = np.asarray(weights[2] ) a = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCAmelCase__ ).view(-1 , UpperCAmelCase__ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' a = weights[0][0][0] a = np.asarray(layer_norm_a[0] ) a = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , ) # lsh weights + output a = weights[0][1] if len(UpperCAmelCase__ ) < 4: set_layer_weights_in_torch_lsh(UpperCAmelCase__ , torch_block.attention , UpperCAmelCase__ ) else: set_layer_weights_in_torch_local(UpperCAmelCase__ , torch_block.attention , UpperCAmelCase__ ) # intermediate weighs a = weights[2][0][1][2] # Chunked Feed Forward if len(UpperCAmelCase__ ) == 4: a = intermediate_weights[2] # layernorm 2 a = np.asarray(intermediate_weights[0][0] ) a = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , ) # intermediate dense a = np.asarray(intermediate_weights[1][0] ) a = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , ) # intermediate out a = np.asarray(intermediate_weights[4][0] ) a = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :int ): '''simple docstring''' a = torch_model.reformer # word embeds a = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCAmelCase__ ) , ) if isinstance(weights[3] , UpperCAmelCase__ ): a = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): a = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"""{position_embeddings[emb_idx]} emb does not match""" a = nn.Parameter(torch.tensor(UpperCAmelCase__ ) ) a = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( UpperCAmelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): a = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # output layer norm a = np.asarray(weights[7][0] ) a = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , ) # output embeddings a = np.asarray(weights[9][0] ) a = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , ) def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Dict ): '''simple docstring''' a = ReformerConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = ReformerModelWithLMHead(UpperCAmelCase__ ) with open(UpperCAmelCase__ , "rb" ) as f: a = pickle.load(UpperCAmelCase__ )["weights"] set_model_weights_in_torch(UpperCAmelCase__ , UpperCAmelCase__ , config.hidden_size ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": A_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Optional[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
705
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
32
0
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
706
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : str = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = '''focalnet''' def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = use_conv_embed a = hidden_sizes a = depths a = focal_levels a = focal_windows a = hidden_act a = mlp_ratio a = hidden_dropout_prob a = drop_path_rate a = use_layerscale a = layerscale_value a = use_post_layernorm a = use_post_layernorm_in_modulation a = normalize_modulator a = initializer_range a = layer_norm_eps a = encoder_stride a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
32
0
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) A_ : Dict = logging.getLogger() def UpperCAmelCase__ ( ) -> Union[str, Any]: '''simple docstring''' a = argparse.ArgumentParser() parser.add_argument("-f" ) a = parser.parse_args() return args.f def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] ) -> Dict: '''simple docstring''' a = {} a = os.path.join(UpperCAmelCase__ , "all_results.json" ) if os.path.exists(UpperCAmelCase__ ): with open(UpperCAmelCase__ , "r" ) as f: a = json.load(UpperCAmelCase__ ) else: raise ValueError(F"""can't find {path}""" ) return results def UpperCAmelCase__ ( ) -> Tuple: '''simple docstring''' a = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() A_ : Tuple = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _lowercase ( UpperCAmelCase__ ): @classmethod def A ( cls : Union[str, Any] ) -> List[Any]: """simple docstring""" a = tempfile.mkdtemp() a = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) a = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def A ( cls : List[Any] ) -> Optional[Any]: """simple docstring""" shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def A ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def A ( self : List[str] ) -> Tuple: """simple docstring""" a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) self.assertLess(result["perplexity"] , 100 ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) self.assertLess(result["perplexity"] , 42 ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def A ( self : Tuple ) -> List[str]: """simple docstring""" a = 7 if get_gpu_count() > 1 else 2 a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 28 ) self.assertGreaterEqual(result["eval_exact"] , 28 ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def A ( self : Any ) -> Optional[Any]: """simple docstring""" a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_rouge1"] , 10 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_bleu"] , 30 ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "translation_no_trainer" ) ) ) @slow def A ( self : Tuple ) -> Tuple: """simple docstring""" a = logging.StreamHandler(sys.stdout ) logger.addHandler(__lowerCAmelCase ) a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.1_0 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def A ( self : List[Any] ) -> List[str]: """simple docstring""" a = self.get_auto_remove_tmp_dir() a = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) a = get_results(__lowerCAmelCase ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , "image_classification_no_trainer" ) ) )
707
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head: return True # split the list to two parts a , a = head.next, head while fast and fast.next: a = fast.next.next a = slow.next a = slow.next a = None # Don't forget here! But forget still works! # reverse the second part a = None while second: a = second.next a = node a = second a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a = node.next a = head.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a = a = a = head while fast and fast.next: a , a = fast.next.next, slow.next # 2. Push the second half into the stack a = [slow.val] while slow.next: a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a = cur.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head or not head.next: return True a = {} a = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase__ ) else: a = [pos] a = head.next pos += 1 a = pos - 1 a = 0 for v in d.values(): if len(UpperCAmelCase__ ) % 2 != 0: middle += 1 else: a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
0
'''simple docstring''' from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
708
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = embedding_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = MobileBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str: """simple docstring""" a = MobileBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]: """simple docstring""" a = MobileBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = MobileBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" a = self.num_labels a = MobileBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" a = self.num_labels a = MobileBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = self.num_choices a = MobileBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = MobileBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def A ( self : str ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def A ( self : List[Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self : int ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) A_ : Dict = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase ) a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=__lowerCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
32
0
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. A_ : Dict = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. A_ : str = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. A_ : List[str] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :str ): '''simple docstring''' a = len([g for position, g in enumerate(UpperCAmelCase__ ) if g == main_target[position]] ) return (item, float(UpperCAmelCase__ )) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :str ): '''simple docstring''' a = random.randint(0 , len(UpperCAmelCase__ ) - 1 ) a = parent_a[:random_slice] + parent_a[random_slice:] a = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :list[str] ): '''simple docstring''' a = list(UpperCAmelCase__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: a = random.choice(UpperCAmelCase__ ) return "".join(UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ :tuple[str, float] , UpperCAmelCase__ :list[tuple[str, float]] , UpperCAmelCase__ :list[str] , ): '''simple docstring''' a = [] # Generate more children proportionally to the fitness score. a = int(parent_a[1] * 1_00 ) + 1 a = 10 if child_n >= 10 else child_n for _ in range(UpperCAmelCase__ ): a = population_score[random.randint(0 , UpperCAmelCase__ )][0] a , a = crossover(parent_a[0] , UpperCAmelCase__ ) # Append new string to the population list. pop.append(mutate(UpperCAmelCase__ , UpperCAmelCase__ ) ) pop.append(mutate(UpperCAmelCase__ , UpperCAmelCase__ ) ) return pop def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :list[str] , UpperCAmelCase__ :bool = True ): '''simple docstring''' if N_POPULATION < N_SELECTED: a = F"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(UpperCAmelCase__ ) # Verify that the target contains no genes besides the ones inside genes variable. a = sorted({c for c in target if c not in genes} ) if not_in_genes_list: a = F"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(UpperCAmelCase__ ) # Generate random starting population. a = [] for _ in range(UpperCAmelCase__ ): population.append("".join([random.choice(UpperCAmelCase__ ) for i in range(len(UpperCAmelCase__ ) )] ) ) # Just some logs to know what the algorithms is doing. a , a = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(UpperCAmelCase__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. a = [evaluate(UpperCAmelCase__ , UpperCAmelCase__ ) for item in population] # Check if there is a matching evolution. a = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] , reverse=UpperCAmelCase__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"""\nGeneration: {generation}""" F"""\nTotal Population:{total_population}""" F"""\nBest score: {population_score[0][1]}""" F"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. a = population[: int(N_POPULATION / 3 )] population.clear() population.extend(UpperCAmelCase__ ) # Normalize population score to be between 0 and 1. a = [ (item, score / len(UpperCAmelCase__ )) for item, score in population_score ] # This is selection for i in range(UpperCAmelCase__ ): population.extend(select(population_score[int(UpperCAmelCase__ )] , UpperCAmelCase__ , UpperCAmelCase__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(UpperCAmelCase__ ) > N_POPULATION: break if __name__ == "__main__": A_ : int = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) A_ : Dict = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) A_ : int = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
709
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
32
0
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def UpperCAmelCase__ ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join a = "__test_patch_submodule_mock__" with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def UpperCAmelCase__ ( ): '''simple docstring''' assert _test_patching.open is open a = "__test_patch_submodule_builtin_mock__" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , "open" , UpperCAmelCase__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def UpperCAmelCase__ ( ): '''simple docstring''' a = "__test_patch_submodule_missing_mock__" with patch_submodule(_test_patching , "pandas.read_csv" , UpperCAmelCase__ ): pass def UpperCAmelCase__ ( ): '''simple docstring''' a = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , "len" , UpperCAmelCase__ ) is None with patch_submodule(_test_patching , "len" , UpperCAmelCase__ ): assert _test_patching.len is mock assert _test_patching.len is len def UpperCAmelCase__ ( ): '''simple docstring''' a = "__test_patch_submodule_start_and_stop_mock__" a = patch_submodule(_test_patching , "open" , UpperCAmelCase__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def UpperCAmelCase__ ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join a = "__test_patch_submodule_successive_join__" a = "__test_patch_submodule_successive_dirname__" a = "__test_patch_submodule_successive_rename__" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ): with patch_submodule(_test_patching , "os.rename" , UpperCAmelCase__ ): with patch_submodule(_test_patching , "os.path.dirname" , UpperCAmelCase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , "os.rename" , UpperCAmelCase__ ): with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ): with patch_submodule(_test_patching , "os.path.dirname" , UpperCAmelCase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def UpperCAmelCase__ ( ): '''simple docstring''' a = "__test_patch_submodule_doesnt_exist_mock__" with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , UpperCAmelCase__ ): pass with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , UpperCAmelCase__ ): pass
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
32
0
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' a = {} a = tokenizer(example["content"] , truncation=UpperCAmelCase__ )["input_ids"] a = len(example["content"] ) / len(output["input_ids"] ) return output A_ : Tuple = HfArgumentParser(PretokenizationArguments) A_ : List[str] = parser.parse_args() if args.num_workers is None: A_ : Union[str, Any] = multiprocessing.cpu_count() A_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir) A_ : Optional[Any] = time.time() A_ : Dict = load_dataset(args.dataset_name, split='''train''') print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") A_ : Union[str, Any] = time.time() A_ : str = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") A_ : Optional[Any] = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
711
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = (UniPCMultistepScheduler,) _UpperCAmelCase = (('''num_inference_steps''', 25),) def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__lowerCAmelCase ) return config def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: new_scheduler.config.solver_order] a , a = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[: new_scheduler.config.solver_order] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if scheduler is None: a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def A ( self : Any ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] a = dummy_past_residuals[: scheduler.config.solver_order] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = UniPCMultistepScheduler(**self.get_scheduler_config() ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a = DEISMultistepScheduler.from_config(scheduler.config ) a = DPMSolverMultistepScheduler.from_config(scheduler.config ) a = UniPCMultistepScheduler.from_config(scheduler.config ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : List[Any] ) -> Dict: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) a = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def A ( self : Optional[int] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def A ( self : Dict ) -> str: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def A ( self : Dict ) -> int: """simple docstring""" a = self.full_loop() a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
32
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : Dict = { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''', '''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''', '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''', '''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''funnel''' _UpperCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', } def __init__( self : Tuple , __lowerCAmelCase : Tuple=3_0522 , __lowerCAmelCase : Optional[Any]=[4, 4, 4] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[int]=3072 , __lowerCAmelCase : str="gelu_new" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1E-9 , __lowerCAmelCase : Any="mean" , __lowerCAmelCase : List[str]="relative_shift" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : str , ) -> int: """simple docstring""" a = vocab_size a = block_sizes a = [1] * len(__lowerCAmelCase ) if block_repeats is None else block_repeats assert len(__lowerCAmelCase ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." a = num_decoder_layers a = d_model a = n_head a = d_head a = d_inner a = hidden_act a = hidden_dropout a = attention_dropout a = activation_dropout a = initializer_range a = initializer_std a = layer_norm_eps assert pooling_type in [ "mean", "max", ], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported.""" a = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported.""" a = attention_type a = separate_cls a = truncate_seq a = pool_q_only super().__init__(**__lowerCAmelCase ) @property def A ( self : Tuple ) -> Any: """simple docstring""" return sum(self.block_sizes ) @num_hidden_layers.setter def A ( self : str , __lowerCAmelCase : Dict ) -> Dict: """simple docstring""" raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." ) @property def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" return len(self.block_sizes ) @num_blocks.setter def A ( self : List[str] , __lowerCAmelCase : int ) -> List[Any]: """simple docstring""" raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
712
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def A ( self : Optional[Any] ) -> int: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = ConvNextVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" a = ConvNextVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = ConvNextVaModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : Tuple ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def A ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" pass def A ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ]: continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : List[Any] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
32
0
'''simple docstring''' A_ : Any = { '''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''', '''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''', '''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''', '''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''', '''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''', '''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''', ''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''', '''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''', '''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/''' } # Exclamation mark is not in ITU-R recommendation # fmt: on A_ : int = {value: key for key, value in MORSE_CODE_DICT.items()} def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' return "".join(REVERSE_DICT[char] for char in message.split() ) def UpperCAmelCase__ ( ): '''simple docstring''' a = "Morse code here!" print(UpperCAmelCase__ ) a = encrypt(UpperCAmelCase__ ) print(UpperCAmelCase__ ) a = decrypt(UpperCAmelCase__ ) print(UpperCAmelCase__ ) if __name__ == "__main__": main()
713
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self : List[str] ) -> List[str]: """simple docstring""" a = "" a = "" a = [] a = 0 a = 256 a = 0 a = 0 a = 0 a = 0 def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) a = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def A ( self : Any ) -> int: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def A ( self : Any ) -> int: """simple docstring""" cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') A_ : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" a = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) a = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" a = model(__lowerCAmelCase )["last_hidden_state"] a = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) # compare the actual values for a slice. a = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
714
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" a = self.unet.config.sample_size a = (batch_size, 3, img_size, img_size) a = self.unet a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma a = sample.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) self.scheduler.set_sigmas(__lowerCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # prediction step a = model(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) a , a = output.prev_sample, output.prev_sample_mean a = sample_mean.clamp(0 , 1 ) a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__lowerCAmelCase )
32
0
# Function to print upper half of diamond (pyramid) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' for i in range(0 , UpperCAmelCase__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(" " , end="" ) for _ in range(0 , i + 1 ): # printing stars print("* " , end="" ) print() def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' for i in range(UpperCAmelCase__ , 0 , -1 ): for _ in range(UpperCAmelCase__ , 0 , -1 ): # printing stars print("* " , end="" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(" " , end="" ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] ): '''simple docstring''' if n <= 0: print(" ... .... nothing printing :(" ) return floyd(UpperCAmelCase__ ) # upper half reverse_floyd(UpperCAmelCase__ ) # lower half if __name__ == "__main__": print(r'''| /\ | |- | |- |--| |\ /| |-''') print(r'''|/ \| |- |_ |_ |__| | \/ | |_''') A_ : Optional[Any] = 1 while K: A_ : int = int(input('''enter the number and , and see the magic : ''')) print() pretty_print(user_number) A_ : Optional[Any] = int(input('''press 0 to exit... and 1 to continue...''')) print('''Good Bye...''')
715
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
0
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = 1 @register_to_config def __init__( self : List[Any] , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : float = 0.1_5 , __lowerCAmelCase : float = 0.0_1 , __lowerCAmelCase : float = 1348.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ) -> Optional[Any]: """simple docstring""" a = sigma_max # setable values a = None self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ) -> torch.FloatTensor: """simple docstring""" return sample def A ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ) -> Optional[Any]: """simple docstring""" a = sampling_eps if sampling_eps is not None else self.config.sampling_eps a = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase ) def A ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ) -> Union[str, Any]: """simple docstring""" a = sigma_min if sigma_min is not None else self.config.sigma_min a = sigma_max if sigma_max is not None else self.config.sigma_max a = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase ) a = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) a = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) ) a = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def A ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> str: """simple docstring""" return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def A ( self : List[str] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ) -> Union[SdeVeOutput, Tuple]: """simple docstring""" if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) a = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) a = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda a = timesteps.to(self.discrete_sigmas.device ) a = self.discrete_sigmas[timesteps].to(sample.device ) a = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device ) a = torch.zeros_like(__lowerCAmelCase ) a = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods a = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): a = diffusion.unsqueeze(-1 ) a = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of a = randn_tensor( sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype ) a = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? a = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase ) def A ( self : Optional[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]: """simple docstring""" if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction a = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr a = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() a = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() a = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 a = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term a = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): a = step_size.unsqueeze(-1 ) a = sample + step_size * model_output a = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor: """simple docstring""" a = timesteps.to(original_samples.device ) a = self.discrete_sigmas.to(original_samples.device )[timesteps] a = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None] ) a = noise + original_samples return noisy_samples def __len__( self : List[Any] ) -> Optional[int]: """simple docstring""" return self.config.num_train_timesteps
716
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) a = import_module("tasks" ) try: a = getattr(UpperCAmelCase__ , model_args.task_type ) a = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task a = token_classification_task.get_labels(data_args.labels ) a = dict(enumerate(UpperCAmelCase__ ) ) a = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) a = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]: a = np.argmax(UpperCAmelCase__ , axis=2 ) a , a = preds.shape a = [[] for _ in range(UpperCAmelCase__ )] a = [[] for _ in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict: a , a = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), "precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), "recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), "f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } # Data collator a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(UpperCAmelCase__ ) # Predict if training_args.do_predict: a = TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) a , a , a = trainer.predict(UpperCAmelCase__ ) a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) a = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions a = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return results def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
32
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : Tuple = logging.get_logger(__name__) A_ : str = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''bert''' def __init__( self : int , __lowerCAmelCase : Any=3_0522 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : int=12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Tuple=0.0_2 , __lowerCAmelCase : Any=1E-12 , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Any="absolute" , __lowerCAmelCase : Dict=True , __lowerCAmelCase : str=None , **__lowerCAmelCase : str , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = use_cache a = classifier_dropout class _lowercase ( UpperCAmelCase__ ): @property def A ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": a = {0: "batch", 1: "choice", 2: "sequence"} else: a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
717
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
0
A_ : Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A_ : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A_ : Optional[Any] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
718
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : List[str] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
32
0
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
719
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any: """simple docstring""" a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : str ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a , a , a , a , a = self.prepare_config_and_inputs() a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): a = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) a = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : int ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer is not a generative model" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : List[str] ) -> Any: """simple docstring""" pass def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self : str ) -> Dict: """simple docstring""" a = (self.model_tester.min_size,) * 2 a = { "pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : List[str] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ : int = 1E-4 def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[int]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) a = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : int ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) a = inputs["pixel_values"].to(__lowerCAmelCase ) a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]] a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
32
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ): '''simple docstring''' a = create_tensor(UpperCAmelCase__ ) a = gather(UpperCAmelCase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] ): '''simple docstring''' a = [state.process_index] a = gather_object(UpperCAmelCase__ ) assert len(UpperCAmelCase__ ) == state.num_processes, F"""{gathered_obj}, {len(UpperCAmelCase__ )} != {state.num_processes}""" assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}""" def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' a = create_tensor(UpperCAmelCase__ ) a = broadcast(UpperCAmelCase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ): '''simple docstring''' if state.is_main_process: a = torch.arange(state.num_processes + 1 ).to(state.device ) else: a = torch.arange(state.num_processes ).to(state.device ) a = pad_across_processes(UpperCAmelCase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ): '''simple docstring''' if state.num_processes != 2: return a = create_tensor(UpperCAmelCase__ ) a = reduce(UpperCAmelCase__ , "sum" ) a = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}""" def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' if state.num_processes != 2: return a = create_tensor(UpperCAmelCase__ ) a = reduce(UpperCAmelCase__ , "mean" ) a = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}""" def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' main() def UpperCAmelCase__ ( ): '''simple docstring''' a = PartialState() state.print(F"""State: {state}""" ) state.print("testing gather" ) test_gather(UpperCAmelCase__ ) state.print("testing gather_object" ) test_gather_object(UpperCAmelCase__ ) state.print("testing broadcast" ) test_broadcast(UpperCAmelCase__ ) state.print("testing pad_across_processes" ) test_pad_across_processes(UpperCAmelCase__ ) state.print("testing reduce_sum" ) test_reduce_sum(UpperCAmelCase__ ) state.print("testing reduce_mean" ) test_reduce_mean(UpperCAmelCase__ ) if __name__ == "__main__": main()
720
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = [[1, 2, 4], [1, 2, 3, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(__lowerCAmelCase ) # fails here def A ( self : int ) -> Any: """simple docstring""" a = [[1, 2, 3], [1, 2, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(3 ) a = stepped is True and completed is True and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
0
import os import pytest from transformers.dynamic_module_utils import get_imports A_ : Tuple = ''' import os ''' A_ : str = ''' def foo(): import os return False ''' A_ : Optional[int] = ''' def foo(): def bar(): if True: import os return False return bar() ''' A_ : Optional[int] = ''' import os try: import bar except ImportError: raise ValueError() ''' A_ : Union[str, Any] = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' A_ : str = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' A_ : Tuple = ''' import os try: import bar except ImportError as e: raise ValueError() ''' A_ : Optional[int] = ''' import os try: import bar except: raise ValueError() ''' A_ : Union[str, Any] = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' A_ : Union[str, Any] = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' A_ : Optional[int] = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case" , UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :Tuple ): '''simple docstring''' a = os.path.join(UpperCAmelCase__ , "test_file.py" ) with open(UpperCAmelCase__ , "w" ) as _tmp_file: _tmp_file.write(UpperCAmelCase__ ) a = get_imports(UpperCAmelCase__ ) assert parsed_imports == ["os"]
721
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
32
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A_ : Optional[Any] = 16 A_ : str = 32 def UpperCAmelCase__ ( UpperCAmelCase__ :Accelerator , UpperCAmelCase__ :int = 16 ): '''simple docstring''' a = AutoTokenizer.from_pretrained("bert-base-cased" ) a = load_dataset("glue" , "mrpc" ) def tokenize_function(UpperCAmelCase__ :str ): # max_length=None => use the model max length (it's actually the default) a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a = datasets.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCAmelCase__ :List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. a = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a = 16 elif accelerator.mixed_precision != "no": a = 8 else: a = None return tokenizer.pad( UpperCAmelCase__ , padding="longest" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="pt" , ) # Instantiate dataloaders. a = DataLoader( tokenized_datasets["train"] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) a = DataLoader( tokenized_datasets["validation"] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders A_ : str = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :int ): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase__ ) == "1": a = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: a = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config["lr"] a = int(config["num_epochs"] ) a = int(config["seed"] ) a = int(config["batch_size"] ) set_seed(UpperCAmelCase__ ) a , a = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ ) a = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation a = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: a = batch_size // MAX_GPU_BATCH_SIZE a = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Instantiate optimizer a = AdamW(params=model.parameters() , lr=UpperCAmelCase__ ) # Instantiate scheduler a = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: a = os.path.split(UpperCAmelCase__ )[-1].split("." )[0] accelerator.init_trackers(UpperCAmelCase__ , UpperCAmelCase__ ) # Now we train the model for epoch in range(UpperCAmelCase__ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: a = 0 for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a = model(**UpperCAmelCase__ ) a = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() a = loss / gradient_accumulation_steps accelerator.backward(UpperCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): a = model(**UpperCAmelCase__ ) a = outputs.logits.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , ) a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase__ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(UpperCAmelCase__ ), "epoch": epoch, } , step=UpperCAmelCase__ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def UpperCAmelCase__ ( ): '''simple docstring''' a = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=UpperCAmelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) a = parser.parse_args() a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": main()
700
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' ) class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> int: """simple docstring""" a = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase ) @slow def A ( self : Optional[int] ) -> List[str]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def A ( self : Dict ) -> Any: """simple docstring""" a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
32
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A_ : Optional[Any] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''input_features'''] def __init__( self : int , __lowerCAmelCase : Dict=80 , __lowerCAmelCase : int=1_6000 , __lowerCAmelCase : Optional[Any]=160 , __lowerCAmelCase : Union[str, Any]=30 , __lowerCAmelCase : Union[str, Any]=400 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Union[str, Any]=False , **__lowerCAmelCase : Union[str, Any] , ) -> str: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) a = n_fft a = hop_length a = chunk_length a = chunk_length * sampling_rate a = self.n_samples // hop_length a = sampling_rate a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ) def A ( self : Optional[Any] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) a = log_spec[:, :-1] a = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0 ) a = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def A ( __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: a = np.array(__lowerCAmelCase , np.intaa ) a = [] for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1 ) ): a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: a = padding_value normed_input_values.append(__lowerCAmelCase ) else: a = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : str , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[str] = "max_length" , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , **__lowerCAmelCase : str , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] a = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding a = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: a = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) a = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format a = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) a = [self._np_extract_fbank_features(__lowerCAmelCase ) for waveform in input_features[0]] if isinstance(input_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features] else: a = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) a = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: a = padded_inputs.convert_to_tensors(__lowerCAmelCase ) return padded_inputs def A ( self : str ) -> Dict[str, Any]: """simple docstring""" a = copy.deepcopy(self.__dict__ ) a = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Tuple = logging.get_logger(__name__) A_ : Any = { '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''pix2struct_text_model''' _UpperCAmelCase = ['''past_key_values'''] _UpperCAmelCase = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[int] , __lowerCAmelCase : int=5_0244 , __lowerCAmelCase : str=768 , __lowerCAmelCase : List[Any]=64 , __lowerCAmelCase : Union[str, Any]=2048 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=128 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : int=1E-6 , __lowerCAmelCase : str=1.0 , __lowerCAmelCase : Optional[int]="gelu_new" , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=True , **__lowerCAmelCase : List[str] , ) -> Any: """simple docstring""" a = vocab_size a = hidden_size a = d_kv a = d_ff a = num_layers a = num_heads a = relative_attention_num_buckets a = relative_attention_max_distance a = dropout_rate a = layer_norm_epsilon a = initializer_factor a = use_cache a = eos_token_id a = decoder_start_token_id # for backwards compatibility a = dense_act_fn super().__init__( pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , is_decoder=__lowerCAmelCase , **__lowerCAmelCase , ) @classmethod def A ( cls : Dict , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Dict ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(__lowerCAmelCase ) a , a = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": a = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''pix2struct_vision_model''' def __init__( self : List[Any] , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Optional[int]=2048 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[Any]="gelu_new" , __lowerCAmelCase : Any=1E-6 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : int=1E-10 , __lowerCAmelCase : int=1.0 , __lowerCAmelCase : Optional[Any]=4096 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Any=128 , **__lowerCAmelCase : Optional[Any] , ) -> Tuple: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = hidden_size a = patch_embed_hidden_size a = d_ff a = dropout_rate a = num_hidden_layers a = num_attention_heads a = initializer_range a = initializer_factor a = attention_dropout a = layer_norm_eps a = dense_act_fn a = seq_len a = relative_attention_num_buckets a = relative_attention_max_distance a = d_kv @classmethod def A ( cls : List[str] , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : str ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(__lowerCAmelCase ) a , a = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": a = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''pix2struct''' _UpperCAmelCase = True def __init__( self : Optional[Any] , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[Any]=1.0 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : Any , ) -> List[Any]: """simple docstring""" super().__init__(tie_word_embeddings=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase ) if text_config is None: a = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: a = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) a = PixaStructTextConfig(**__lowerCAmelCase ) a = PixaStructVisionConfig(**__lowerCAmelCase ) a = self.text_config.decoder_start_token_id a = self.text_config.pad_token_id a = self.text_config.eos_token_id a = initializer_factor a = initializer_range a = self.initializer_range a = self.initializer_range a = is_vqa @classmethod def A ( cls : Optional[Any] , __lowerCAmelCase : PixaStructTextConfig , __lowerCAmelCase : PixaStructVisionConfig , **__lowerCAmelCase : List[Any] ) -> Optional[Any]: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCAmelCase ) def A ( self : Optional[int] ) -> Any: """simple docstring""" a = copy.deepcopy(self.__dict__ ) a = self.text_config.to_dict() a = self.vision_config.to_dict() a = self.__class__.model_type return output
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ : Optional[int] = { '''configuration_mobilenet_v2''': [ '''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileNetV2Config''', '''MobileNetV2OnnxConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[str] = ['''MobileNetV2FeatureExtractor'''] A_ : Tuple = ['''MobileNetV2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Tuple = [ '''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileNetV2ForImageClassification''', '''MobileNetV2ForSemanticSegmentation''', '''MobileNetV2Model''', '''MobileNetV2PreTrainedModel''', '''load_tf_weights_in_mobilenet_v2''', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys A_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
703
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
0
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Optional[int] ): '''simple docstring''' a = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors a = load_file(UpperCAmelCase__ ) a = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: a = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" ) a = pipeline.text_encoder else: a = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" ) a = pipeline.unet # find the target layer a = layer_infos.pop(0 ) while len(UpperCAmelCase__ ) > -1: try: a = curr_layer.__getattr__(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 0: a = layer_infos.pop(0 ) elif len(UpperCAmelCase__ ) == 0: break except Exception: if len(UpperCAmelCase__ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: a = layer_infos.pop(0 ) a = [] if "lora_down" in key: pair_keys.append(key.replace("lora_down" , "lora_up" ) ) pair_keys.append(UpperCAmelCase__ ) else: pair_keys.append(UpperCAmelCase__ ) pair_keys.append(key.replace("lora_up" , "lora_down" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: a = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) a = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 ) else: a = state_dict[pair_keys[0]].to(torch.floataa ) a = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ) # update visited list for item in pair_keys: visited.append(UpperCAmelCase__ ) return pipeline if __name__ == "__main__": A_ : int = argparse.ArgumentParser() parser.add_argument( '''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.''' ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors''' ) parser.add_argument( '''--lora_prefix_text_encoder''', default='''lora_te''', type=str, help='''The prefix of text encoder weight in safetensors''', ) parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''') parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''' ) parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') A_ : Tuple = parser.parse_args() A_ : int = args.base_model_path A_ : int = args.checkpoint_path A_ : Union[str, Any] = args.dump_path A_ : str = args.lora_prefix_unet A_ : Tuple = args.lora_prefix_text_encoder A_ : Dict = args.alpha A_ : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) A_ : List[str] = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
704
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = field(default='''language-modeling''', metadata={'''include_in_asdict_even_if_is_default''': True} ) _UpperCAmelCase = Features({'''text''': Value('''string''' )} ) _UpperCAmelCase = Features({} ) _UpperCAmelCase = '''text''' @property def A ( self : int ) -> Dict[str, str]: """simple docstring""" return {self.text_column: "text"}
705
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
32
0
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor A_ : Any = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): def __init__( self : Dict , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Union[str, Any] ) -> None: """simple docstring""" warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
706
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : str = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = '''focalnet''' def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = use_conv_embed a = hidden_sizes a = depths a = focal_levels a = focal_windows a = hidden_act a = mlp_ratio a = hidden_dropout_prob a = drop_path_rate a = use_layerscale a = layerscale_value a = use_post_layernorm a = use_post_layernorm_in_modulation a = normalize_modulator a = initializer_range a = layer_norm_eps a = encoder_stride a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
32
0
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) A_ : Optional[int] = logging.getLogger() def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ) -> str: '''simple docstring''' a = {} a = os.path.join(UpperCAmelCase__ , "all_results.json" ) if os.path.exists(UpperCAmelCase__ ): with open(UpperCAmelCase__ , "r" ) as f: a = json.load(UpperCAmelCase__ ) else: raise ValueError(F"""can't find {path}""" ) return results A_ : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class _lowercase ( UpperCAmelCase__ ): def A ( self : Any ) -> Tuple: """simple docstring""" import xla_spawn a = self.get_auto_remove_tmp_dir() a = f""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(__lowerCAmelCase , "argv" , __lowerCAmelCase ): a = time() xla_spawn.main() a = time() a = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def A ( self : Tuple ) -> Tuple: """simple docstring""" import xla_spawn a = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(__lowerCAmelCase , "argv" , __lowerCAmelCase ): xla_spawn.main()
707
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head: return True # split the list to two parts a , a = head.next, head while fast and fast.next: a = fast.next.next a = slow.next a = slow.next a = None # Don't forget here! But forget still works! # reverse the second part a = None while second: a = second.next a = node a = second a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a = node.next a = head.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a = a = a = head while fast and fast.next: a , a = fast.next.next, slow.next # 2. Push the second half into the stack a = [slow.val] while slow.next: a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a = cur.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head or not head.next: return True a = {} a = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase__ ) else: a = [pos] a = head.next pos += 1 a = pos - 1 a = 0 for v in d.values(): if len(UpperCAmelCase__ ) % 2 != 0: middle += 1 else: a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
0
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union A_ : List[Any] = TypeVar('''T''') A_ : int = Union[List[T], Tuple[T, ...]] A_ : Tuple = Union[T, List[T], Dict[str, T]] A_ : int = Union[str, bytes, os.PathLike]
708
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = embedding_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = MobileBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str: """simple docstring""" a = MobileBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]: """simple docstring""" a = MobileBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = MobileBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" a = self.num_labels a = MobileBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" a = self.num_labels a = MobileBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = self.num_choices a = MobileBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = MobileBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def A ( self : str ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def A ( self : List[Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self : int ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) A_ : Dict = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase ) a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=__lowerCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
32
0
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): A_ : List[str] = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) A_ : List[Any] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } A_ : Dict = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : str = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } A_ : Tuple = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Optional[int] = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) A_ : Optional[int] = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Tuple = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) A_ : Optional[int] = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : str = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' A_ : Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' A_ : str = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' A_ : List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' A_ : int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' A_ : List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' A_ : Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' A_ : int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' A_ : Optional[Any] = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Tuple = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' A_ : int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' A_ : Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' A_ : Dict = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' A_ : str = '''''' A_ : List[str] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' A_ : Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : List[Any] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' assert ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ ).to_dict() == expected_dict @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :int ): '''simple docstring''' with pytest.raises(UpperCAmelCase__ , match=re.escape(expected_error.format(path="root" ) ) ): a = ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' with pytest.raises(UpperCAmelCase__ , match=re.escape(expected_error.format(path="root" ) ) ): ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ ) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ , suppress_parsing_errors=UpperCAmelCase__ ) @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: a = Path(UpperCAmelCase__ ) / "README.md" with open(UpperCAmelCase__ , "w+" ) as readme_file: readme_file.write(UpperCAmelCase__ ) a = ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: a = Path(UpperCAmelCase__ ) / "README.md" with open(UpperCAmelCase__ , "w+" ) as readme_file: readme_file.write(UpperCAmelCase__ ) a = expected_error.format(path=UpperCAmelCase__ ) with pytest.raises(UpperCAmelCase__ , match=re.escape(UpperCAmelCase__ ) ): a = ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: a = Path(UpperCAmelCase__ ) / "README.md" with open(UpperCAmelCase__ , "w+" ) as readme_file: readme_file.write(UpperCAmelCase__ ) a = expected_error.format(path=UpperCAmelCase__ ) with pytest.raises(UpperCAmelCase__ , match=re.escape(UpperCAmelCase__ ) ): ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ ) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: a = Path(UpperCAmelCase__ ) / "README.md" with open(UpperCAmelCase__ , "w+" ) as readme_file: readme_file.write(UpperCAmelCase__ ) ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ , suppress_parsing_errors=UpperCAmelCase__ )
709
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
32
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) A_ : str = logging.get_logger(__name__) A_ : Optional[int] = OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) A_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: a = model_type_to_module_name(UpperCAmelCase__ ) a = importlib.import_module(F""".{module_name}""" , "transformers.models" ) try: return getattr(UpperCAmelCase__ , UpperCAmelCase__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(UpperCAmelCase__ , "__name__" , UpperCAmelCase__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. a = importlib.import_module("transformers" ) if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ): return getattr(UpperCAmelCase__ , UpperCAmelCase__ ) return None def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, os.PathLike] , UpperCAmelCase__ :Optional[Union[str, os.PathLike]] = None , UpperCAmelCase__ :bool = False , UpperCAmelCase__ :bool = False , UpperCAmelCase__ :Optional[Dict[str, str]] = None , UpperCAmelCase__ :Optional[Union[bool, str]] = None , UpperCAmelCase__ :Optional[str] = None , UpperCAmelCase__ :bool = False , **UpperCAmelCase__ :List[str] , ): '''simple docstring''' a = get_file_from_repo( UpperCAmelCase__ , UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , resume_download=UpperCAmelCase__ , proxies=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , revision=UpperCAmelCase__ , local_files_only=UpperCAmelCase__ , ) if resolved_config_file is None: logger.info( "Could not locate the feature extractor configuration file, will try to use the model config instead." ) return {} with open(UpperCAmelCase__ , encoding="utf-8" ) as reader: return json.load(UpperCAmelCase__ ) class _lowercase : def __init__( self : Optional[Any] ) -> List[str]: """simple docstring""" raise EnvironmentError( "AutoFeatureExtractor is designed to be instantiated " "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(__lowerCAmelCase ) def A ( cls : List[str] , __lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" a = kwargs.pop("config" , __lowerCAmelCase ) a = kwargs.pop("trust_remote_code" , __lowerCAmelCase ) a = True a , a = FeatureExtractionMixin.get_feature_extractor_dict(__lowerCAmelCase , **__lowerCAmelCase ) a = config_dict.get("feature_extractor_type" , __lowerCAmelCase ) a = None if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): a = config_dict["auto_map"]["AutoFeatureExtractor"] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): a = AutoConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) # It could be in `config.feature_extractor_type`` a = getattr(__lowerCAmelCase , "feature_extractor_type" , __lowerCAmelCase ) if hasattr(__lowerCAmelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map: a = config.auto_map["AutoFeatureExtractor"] if feature_extractor_class is not None: a = feature_extractor_class_from_name(__lowerCAmelCase ) a = feature_extractor_auto_map is not None a = feature_extractor_class is not None or type(__lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING a = resolve_trust_remote_code( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if has_remote_code and trust_remote_code: a = get_class_from_dynamic_module( __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) a = kwargs.pop("code_revision" , __lowerCAmelCase ) if os.path.isdir(__lowerCAmelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(__lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING: a = FEATURE_EXTRACTOR_MAPPING[type(__lowerCAmelCase )] return feature_extractor_class.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) raise ValueError( f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def A ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Dict: """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(__lowerCAmelCase , __lowerCAmelCase )
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
32
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) A_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name A_ : Dict = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Optional[int]=8 ): '''simple docstring''' a = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _lowercase ( UpperCAmelCase__ ): def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : VQModel , ) -> Union[str, Any]: """simple docstring""" super().__init__() self.register_modules( unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , ) a = 2 ** (len(self.movq.config.block_out_channels ) - 1) def A ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" if latents is None: a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) a = latents.to(__lowerCAmelCase ) a = latents * scheduler.init_noise_sigma return latents def A ( self : Any , __lowerCAmelCase : Any=0 ) -> List[Any]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) a = torch.device(f"""cuda:{gpu_id}""" ) a = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : Dict , __lowerCAmelCase : Any=0 ) -> Any: """simple docstring""" if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) a = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__lowerCAmelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a = None for cpu_offloaded_model in [self.unet, self.movq]: a , a = cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase ) # We'll offload the last model manually. a = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def A ( self : List[Any] ) -> str: """simple docstring""" if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__lowerCAmelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__lowerCAmelCase ) def __call__( self : List[str] , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 512 , __lowerCAmelCase : int = 100 , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> List[str]: """simple docstring""" a = self._execution_device a = guidance_scale > 1.0 if isinstance(__lowerCAmelCase , __lowerCAmelCase ): a = torch.cat(__lowerCAmelCase , dim=0 ) a = image_embeds.shape[0] * num_images_per_prompt if isinstance(__lowerCAmelCase , __lowerCAmelCase ): a = torch.cat(__lowerCAmelCase , dim=0 ) if do_classifier_free_guidance: a = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 ) a = negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 ) a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase ) self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase ) a = self.scheduler.timesteps a = self.unet.config.in_channels a , a = downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor ) # create initial latent a = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ): # expand the latents if we are doing classifier free guidance a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a = {"image_embeds": image_embeds} a = self.unet( sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0] if do_classifier_free_guidance: a , a = noise_pred.split(latents.shape[1] , dim=1 ) a , a = noise_pred.chunk(2 ) a , a = variance_pred.chunk(2 ) a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a , a = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a = self.scheduler.step( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0] # post-processing a = self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: a = image * 0.5 + 0.5 a = image.clamp(0 , 1 ) a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCAmelCase )
711
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = (UniPCMultistepScheduler,) _UpperCAmelCase = (('''num_inference_steps''', 25),) def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__lowerCAmelCase ) return config def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: new_scheduler.config.solver_order] a , a = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[: new_scheduler.config.solver_order] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if scheduler is None: a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def A ( self : Any ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] a = dummy_past_residuals[: scheduler.config.solver_order] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = UniPCMultistepScheduler(**self.get_scheduler_config() ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a = DEISMultistepScheduler.from_config(scheduler.config ) a = DPMSolverMultistepScheduler.from_config(scheduler.config ) a = UniPCMultistepScheduler.from_config(scheduler.config ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : List[Any] ) -> Dict: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) a = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def A ( self : Optional[int] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def A ( self : Dict ) -> str: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def A ( self : Dict ) -> int: """simple docstring""" a = self.full_loop() a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
32
0
from typing import List from .keymap import KEYMAP, get_character def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' def decorator(UpperCAmelCase__ :Any ): a = getattr(UpperCAmelCase__ , "handle_key" , [] ) handle += [key] setattr(UpperCAmelCase__ , "handle_key" , UpperCAmelCase__ ) return func return decorator def UpperCAmelCase__ ( *UpperCAmelCase__ :List[str] ): '''simple docstring''' def decorator(UpperCAmelCase__ :str ): a = getattr(UpperCAmelCase__ , "handle_key" , [] ) handle += keys setattr(UpperCAmelCase__ , "handle_key" , UpperCAmelCase__ ) return func return decorator class _lowercase ( UpperCAmelCase__ ): def __new__( cls : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ) -> Optional[Any]: """simple docstring""" a = super().__new__(cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not hasattr(__lowerCAmelCase , "key_handler" ): setattr(__lowerCAmelCase , "key_handler" , {} ) setattr(__lowerCAmelCase , "handle_input" , KeyHandler.handle_input ) for value in attrs.values(): a = getattr(__lowerCAmelCase , "handle_key" , [] ) for key in handled_keys: a = value return new_cls @staticmethod def A ( cls : int ) -> Optional[Any]: """simple docstring""" a = get_character() if char != KEYMAP["undefined"]: a = ord(__lowerCAmelCase ) a = cls.key_handler.get(__lowerCAmelCase ) if handler: a = char return handler(cls ) else: return None def UpperCAmelCase__ ( cls :Tuple ): '''simple docstring''' return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
712
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def A ( self : Optional[Any] ) -> int: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = ConvNextVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" a = ConvNextVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = ConvNextVaModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : Tuple ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def A ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" pass def A ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ]: continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : List[Any] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
32
0
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :int ): '''simple docstring''' return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__ ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
713
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self : List[str] ) -> List[str]: """simple docstring""" a = "" a = "" a = [] a = 0 a = 256 a = 0 a = 0 a = 0 a = 0 def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) a = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def A ( self : Any ) -> int: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def A ( self : Any ) -> int: """simple docstring""" cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') A_ : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
0
def UpperCAmelCase__( UpperCAmelCase__ :list ): '''simple docstring''' if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] a = grid[0] for row_n in range(1 , len(UpperCAmelCase__ ) ): a = grid[row_n] a = fill_row(UpperCAmelCase__ , UpperCAmelCase__ ) a = grid[row_n] return grid[-1][-1] def UpperCAmelCase__( UpperCAmelCase__ :list , UpperCAmelCase__ :list ): '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 , len(UpperCAmelCase__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
714
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" a = self.unet.config.sample_size a = (batch_size, 3, img_size, img_size) a = self.unet a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma a = sample.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) self.scheduler.set_sigmas(__lowerCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # prediction step a = model(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) a , a = output.prev_sample, output.prev_sample_mean a = sample_mean.clamp(0 , 1 ) a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__lowerCAmelCase )
32
0
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
715
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
0
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) A_ : List[str] = 2_99_79_24_58 # Symbols A_ : Union[str, Any] = symbols('''ct x y z''') def UpperCAmelCase__ ( UpperCAmelCase__ :float ): '''simple docstring''' if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def UpperCAmelCase__ ( UpperCAmelCase__ :float ): '''simple docstring''' return 1 / sqrt(1 - beta(UpperCAmelCase__ ) ** 2 ) def UpperCAmelCase__ ( UpperCAmelCase__ :float ): '''simple docstring''' return np.array( [ [gamma(UpperCAmelCase__ ), -gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), 0, 0], [-gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), gamma(UpperCAmelCase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :np.ndarray | None = None ): '''simple docstring''' if event is None: a = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(UpperCAmelCase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: A_ : Tuple = transform(29_97_92_45) print('''Example of four vector: ''') print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values A_ : List[str] = {ct: c, x: 1, y: 1, z: 1} A_ : List[str] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
716
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) a = import_module("tasks" ) try: a = getattr(UpperCAmelCase__ , model_args.task_type ) a = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task a = token_classification_task.get_labels(data_args.labels ) a = dict(enumerate(UpperCAmelCase__ ) ) a = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) a = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]: a = np.argmax(UpperCAmelCase__ , axis=2 ) a , a = preds.shape a = [[] for _ in range(UpperCAmelCase__ )] a = [[] for _ in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict: a , a = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), "precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), "recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), "f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } # Data collator a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(UpperCAmelCase__ ) # Predict if training_args.do_predict: a = TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) a , a , a = trainer.predict(UpperCAmelCase__ ) a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) a = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions a = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return results def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
32
0
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : List[Any]=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[Any]=10 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : Optional[Any]=[1, 16, 4, 4] , __lowerCAmelCase : Tuple=None , ) -> int: """simple docstring""" a = parent a = batch_size a = image_size a = patch_size a = num_channels a = is_training a = use_labels a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = type_sequence_label_size a = initializer_range a = scope a = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size a = (self.image_size // 32) ** 2 a = num_patches + 1 def A ( self : Tuple ) -> Optional[int]: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = self.get_config() return config, pixel_values, labels def A ( self : Any ) -> List[str]: """simple docstring""" a = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [4, 8, 16, 32], "num_groups": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__lowerCAmelCase , ) def A ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = ViTHybridModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" a = self.type_sequence_label_size a = ViTHybridForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A ( self : Dict ) -> Tuple: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = ViTHybridModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : str ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def A ( self : str ) -> Any: """simple docstring""" pass def A ( self : Dict ) -> Dict: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def A ( self : Tuple ) -> str: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : str ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : List[Any] ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) def A ( self : Optional[Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() a = _config_zero_init(__lowerCAmelCase ) for model_class in self.all_model_classes: a = model_class(config=__lowerCAmelCase ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": a = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ViTHybridModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A ( self : str ) -> List[str]: """simple docstring""" a = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow @require_accelerate def A ( self : int ) -> int: """simple docstring""" a = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" ) a = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" ) a = prepare_img() a = image_processor(images=__lowerCAmelCase , return_tensors="pt" ) a = model(**__lowerCAmelCase ) a = outputs.logits # model predicts one of the 1000 ImageNet classes a = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
717
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
0
A_ : List[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def UpperCAmelCase__ ( UpperCAmelCase__ :bytes ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): a = F"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(UpperCAmelCase__ ) a = "".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data ) a = len(UpperCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later a = b"=" * ((6 - len(UpperCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6) else: a = b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode() + padding ) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): a = ( "argument should be a bytes-like object or ASCII string, " F"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(UpperCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): try: a = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) a = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one a = encoded_data[:-padding] a = "".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: a = "".join( bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) a = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(UpperCAmelCase__ ) , 8 ) ] return bytes(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
718
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : List[str] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
32
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : List[Any] = logging.get_logger(__name__) A_ : Any = { '''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''biogpt''' def __init__( self : List[str] , __lowerCAmelCase : Optional[int]=4_2384 , __lowerCAmelCase : List[Any]=1024 , __lowerCAmelCase : Union[str, Any]=24 , __lowerCAmelCase : int=16 , __lowerCAmelCase : List[Any]=4096 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Optional[int]=1024 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Tuple=1E-12 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : Any=2 , **__lowerCAmelCase : int , ) -> Optional[int]: """simple docstring""" a = vocab_size a = max_position_embeddings a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = scale_embedding a = use_cache a = layerdrop a = activation_dropout super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
719
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any: """simple docstring""" a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : str ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a , a , a , a , a = self.prepare_config_and_inputs() a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): a = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) a = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : int ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer is not a generative model" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : List[str] ) -> Any: """simple docstring""" pass def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self : str ) -> Dict: """simple docstring""" a = (self.model_tester.min_size,) * 2 a = { "pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : List[str] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ : int = 1E-4 def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[int]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) a = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : int ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) a = inputs["pixel_values"].to(__lowerCAmelCase ) a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]] a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
32
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def UpperCAmelCase__ ( UpperCAmelCase__ :dict ): '''simple docstring''' return (data["data"], data["target"]) def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ): '''simple docstring''' a = XGBClassifier() classifier.fit(UpperCAmelCase__ , UpperCAmelCase__ ) return classifier def UpperCAmelCase__ ( ): '''simple docstring''' a = load_iris() a , a = data_handling(UpperCAmelCase__ ) a , a , a , a = train_test_split( UpperCAmelCase__ , UpperCAmelCase__ , test_size=0.25 ) a = iris["target_names"] # Create an XGBoost Classifier from the training data a = xgboost(UpperCAmelCase__ , UpperCAmelCase__ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , display_labels=UpperCAmelCase__ , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
720
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = [[1, 2, 4], [1, 2, 3, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(__lowerCAmelCase ) # fails here def A ( self : int ) -> Any: """simple docstring""" a = [[1, 2, 3], [1, 2, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(3 ) a = stepped is True and completed is True and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
0
from math import sqrt def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(UpperCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCAmelCase__ ( UpperCAmelCase__ :int = 1_00_01 ): '''simple docstring''' a = 0 a = 1 while count != nth and number < 3: number += 1 if is_prime(UpperCAmelCase__ ): count += 1 while count != nth: number += 2 if is_prime(UpperCAmelCase__ ): count += 1 return number if __name__ == "__main__": print(F"""{solution() = }""")
721
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
32
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _lowercase ( unittest.TestCase, UpperCAmelCase__ ): def A ( self : List[Any] ) -> Tuple: """simple docstring""" a = load_tool("text-classification" ) self.tool.setup() a = load_tool("text-classification" , remote=__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(__lowerCAmelCase , "positive" ) def A ( self : Any ) -> Dict: """simple docstring""" a = self.remote_tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(__lowerCAmelCase , "positive" ) def A ( self : int ) -> Optional[int]: """simple docstring""" a = self.tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(__lowerCAmelCase , "positive" ) def A ( self : Any ) -> Tuple: """simple docstring""" a = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(__lowerCAmelCase , "positive" )
700
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' ) class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> int: """simple docstring""" a = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase ) @slow def A ( self : Optional[int] ) -> List[str]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def A ( self : Dict ) -> Any: """simple docstring""" a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
32
0
from numpy import exp, pi, sqrt def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :float = 0.0 , UpperCAmelCase__ :float = 1.0 ): '''simple docstring''' return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :str ): '''simple docstring''' a = MobileBertConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = MobileBertForPreTraining(UpperCAmelCase__ ) # Load weights from tf checkpoint a = load_tf_weights_in_mobilebert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , UpperCAmelCase__ ) if __name__ == "__main__": A_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--mobilebert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained MobileBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float ): '''simple docstring''' if days_between_payments <= 0: raise ValueError("days_between_payments must be > 0" ) if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * daily_interest_rate * days_between_payments def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float , ): '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float , ): '''simple docstring''' if number_of_years <= 0: raise ValueError("number_of_years must be > 0" ) if nominal_annual_percentage_rate < 0: raise ValueError("nominal_annual_percentage_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return compound_interest( UpperCAmelCase__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 ) if __name__ == "__main__": import doctest doctest.testmod()
703
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''image_processor''', '''tokenizer'''] _UpperCAmelCase = '''BridgeTowerImageProcessor''' _UpperCAmelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Any , ) -> BatchEncoding: """simple docstring""" a = self.tokenizer( text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) # add pixel_values + pixel_mask a = self.image_processor( __lowerCAmelCase , return_tensors=__lowerCAmelCase , do_normalize=__lowerCAmelCase , do_center_crop=__lowerCAmelCase , **__lowerCAmelCase ) encoding.update(__lowerCAmelCase ) return encoding def A ( self : Optional[int] , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[int] ) -> Dict: """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def A ( self : str , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self : int ) -> Dict: """simple docstring""" a = self.tokenizer.model_input_names a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
704
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("multiplicative_persistence() only accepts integral values" ) if num < 0: raise ValueError("multiplicative_persistence() does not accept negative values" ) a = 0 a = str(UpperCAmelCase__ ) while len(UpperCAmelCase__ ) != 1: a = [int(UpperCAmelCase__ ) for i in num_string] a = 1 for i in range(0 , len(UpperCAmelCase__ ) ): total *= numbers[i] a = str(UpperCAmelCase__ ) steps += 1 return steps def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("additive_persistence() only accepts integral values" ) if num < 0: raise ValueError("additive_persistence() does not accept negative values" ) a = 0 a = str(UpperCAmelCase__ ) while len(UpperCAmelCase__ ) != 1: a = [int(UpperCAmelCase__ ) for i in num_string] a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): total += numbers[i] a = str(UpperCAmelCase__ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
705
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :int = 10_00 ): '''simple docstring''' a = 3 a = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F"""{solution() = }""")
706
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : str = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = '''focalnet''' def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = use_conv_embed a = hidden_sizes a = depths a = focal_levels a = focal_windows a = hidden_act a = mlp_ratio a = hidden_dropout_prob a = drop_path_rate a = use_layerscale a = layerscale_value a = use_post_layernorm a = use_post_layernorm_in_modulation a = normalize_modulator a = initializer_range a = layer_norm_eps a = encoder_stride a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
32
0
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def UpperCAmelCase__ ( ) -> List[Any]: '''simple docstring''' print("Making key files..." ) make_key_files("rsa" , 10_24 ) print("Key files generation successful." ) def UpperCAmelCase__ ( UpperCAmelCase__ :int ) -> Dict: '''simple docstring''' print("Generating prime p..." ) a = rabinMiller.generate_large_prime(UpperCAmelCase__ ) print("Generating prime q..." ) a = rabinMiller.generate_large_prime(UpperCAmelCase__ ) a = p * q print("Generating e that is relatively prime to (p - 1) * (q - 1)..." ) while True: a = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(UpperCAmelCase__ , (p - 1) * (q - 1) ) == 1: break print("Calculating d that is mod inverse of e..." ) a = cryptoMath.find_mod_inverse(UpperCAmelCase__ , (p - 1) * (q - 1) ) a = (n, e) a = (n, d) return (public_key, private_key) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :int ) -> str: '''simple docstring''' if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print("\nWARNING:" ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" "Use a different name or delete these files and re-run this program." ) sys.exit() a , a = generate_key(UpperCAmelCase__ ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""" , "w" ) as out_file: out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""" , "w" ) as out_file: out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" ) if __name__ == "__main__": main()
707
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head: return True # split the list to two parts a , a = head.next, head while fast and fast.next: a = fast.next.next a = slow.next a = slow.next a = None # Don't forget here! But forget still works! # reverse the second part a = None while second: a = second.next a = node a = second a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a = node.next a = head.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a = a = a = head while fast and fast.next: a , a = fast.next.next, slow.next # 2. Push the second half into the stack a = [slow.val] while slow.next: a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a = cur.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head or not head.next: return True a = {} a = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase__ ) else: a = [pos] a = head.next pos += 1 a = pos - 1 a = 0 for v in d.values(): if len(UpperCAmelCase__ ) % 2 != 0: middle += 1 else: a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : int = logging.get_logger(__name__) A_ : str = { '''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''', '''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''falcon''' _UpperCAmelCase = ['''past_key_values'''] def __init__( self : int , __lowerCAmelCase : Tuple=6_5024 , __lowerCAmelCase : int=4544 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Union[str, Any]=71 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Dict=11 , __lowerCAmelCase : Any=11 , **__lowerCAmelCase : Any , ) -> int: """simple docstring""" a = vocab_size # Backward compatibility with n_embed kwarg a = kwargs.pop("n_embed" , __lowerCAmelCase ) a = hidden_size if n_embed is None else n_embed a = num_hidden_layers a = num_attention_heads a = layer_norm_epsilon a = initializer_range a = use_cache a = hidden_dropout a = attention_dropout a = bos_token_id a = eos_token_id a = num_attention_heads if num_kv_heads is None else num_kv_heads a = alibi a = new_decoder_architecture a = multi_query # Ignored when new_decoder_architecture is True a = parallel_attn a = bias super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self : List[str] ) -> Optional[Any]: """simple docstring""" return self.hidden_size // self.num_attention_heads @property def A ( self : Dict ) -> Tuple: """simple docstring""" return not self.alibi
708
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = embedding_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = MobileBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str: """simple docstring""" a = MobileBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]: """simple docstring""" a = MobileBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = MobileBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" a = self.num_labels a = MobileBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" a = self.num_labels a = MobileBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = self.num_choices a = MobileBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = MobileBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def A ( self : str ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def A ( self : List[Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self : int ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) A_ : Dict = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase ) a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=__lowerCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
32
0
import numpy as np def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :float ): '''simple docstring''' return np.where(vector > 0 , UpperCAmelCase__ , (alpha * (np.exp(UpperCAmelCase__ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
709
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
32
0
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' a = [] a , a = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) a = result + left + right return input_list def UpperCAmelCase__ ( UpperCAmelCase__ :list ): '''simple docstring''' if len(UpperCAmelCase__ ) <= 1: return input_list a = list(UpperCAmelCase__ ) # iteration for two-way merging a = 2 while p <= len(UpperCAmelCase__ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ ): a = i a = i + p - 1 a = (low + high + 1) // 2 a = merge(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # final merge of last two parts if p * 2 >= len(UpperCAmelCase__ ): a = i a = merge(UpperCAmelCase__ , 0 , UpperCAmelCase__ , len(UpperCAmelCase__ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": A_ : List[str] = input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": A_ : Dict = [] else: A_ : int = [int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
32
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Whether tp freeze the encoder.'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) _UpperCAmelCase = field( default='''summarization''', metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''}, ) _UpperCAmelCase = field( default=1_024, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=142, metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) }, ) _UpperCAmelCase = field( default=142, metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field(default=-1, metadata={'''help''': '''# training examples. -1 means use all.'''} ) _UpperCAmelCase = field(default=-1, metadata={'''help''': '''# validation examples. -1 means use all.'''} ) _UpperCAmelCase = field(default=-1, metadata={'''help''': '''# test examples. -1 means use all.'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Source language id for translation.'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Target language id for translation.'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''# num_beams to use for evaluation.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''}, ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , F"""{split}_results.json""" ) ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() check_output_dir(UpperCAmelCase__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): assert hasattr(UpperCAmelCase__ , UpperCAmelCase__ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(UpperCAmelCase__ , UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(UpperCAmelCase__ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: a = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase__ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): a = tokenizer.lang_code_to_id[data_args.tgt_lang] else: a = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(UpperCAmelCase__ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) a = SeqaSeqDataset # Get datasets a = ( dataset_class( UpperCAmelCase__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) a = ( dataset_class( UpperCAmelCase__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) a = ( dataset_class( UpperCAmelCase__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer a = ( build_compute_metrics_fn(data_args.task , UpperCAmelCase__ ) if training_args.predict_with_generate else None ) a = SeqaSeqTrainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , data_args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , data_collator=SeqaSeqDataCollator( UpperCAmelCase__ , UpperCAmelCase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , ) a = {} # Training if training_args.do_train: logger.info("*** Train ***" ) a = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) a = train_result.metrics a = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , UpperCAmelCase__ , training_args.output_dir ) all_metrics.update(UpperCAmelCase__ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate(metric_key_prefix="val" ) a = data_args.n_val a = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , UpperCAmelCase__ , training_args.output_dir ) all_metrics.update(UpperCAmelCase__ ) if training_args.do_predict: logger.info("*** Predict ***" ) a = trainer.predict(test_dataset=UpperCAmelCase__ , metric_key_prefix="test" ) a = test_output.metrics a = data_args.n_test if trainer.is_world_process_zero(): a = round(metrics["test_loss"] , 4 ) handle_metrics("test" , UpperCAmelCase__ , training_args.output_dir ) all_metrics.update(UpperCAmelCase__ ) if training_args.predict_with_generate: a = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) a = lmap(str.strip , UpperCAmelCase__ ) write_txt_file(UpperCAmelCase__ , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(UpperCAmelCase__ , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' main() if __name__ == "__main__": main()
711
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = (UniPCMultistepScheduler,) _UpperCAmelCase = (('''num_inference_steps''', 25),) def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__lowerCAmelCase ) return config def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: new_scheduler.config.solver_order] a , a = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[: new_scheduler.config.solver_order] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if scheduler is None: a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def A ( self : Any ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] a = dummy_past_residuals[: scheduler.config.solver_order] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = UniPCMultistepScheduler(**self.get_scheduler_config() ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a = DEISMultistepScheduler.from_config(scheduler.config ) a = DPMSolverMultistepScheduler.from_config(scheduler.config ) a = UniPCMultistepScheduler.from_config(scheduler.config ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : List[Any] ) -> Dict: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) a = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def A ( self : Optional[int] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def A ( self : Dict ) -> str: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def A ( self : Dict ) -> int: """simple docstring""" a = self.full_loop() a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
32
0
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : str=32 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : List[str]=37 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : List[Any]=None , ) -> Any: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : int ) -> Dict: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> Optional[int]: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" a = NystromformerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ) -> List[Any]: """simple docstring""" a = NystromformerForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = NystromformerForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> str: """simple docstring""" a = self.num_labels a = NystromformerForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" a = self.num_labels a = NystromformerForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = self.num_choices a = NystromformerForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" a = NystromformerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> str: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> List[str]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def A ( self : str ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def A ( self : Any ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def A ( self : List[str] ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def A ( self : Optional[Any] ) -> Dict: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = NystromformerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class _lowercase ( unittest.TestCase ): @slow def A ( self : int ) -> Optional[int]: """simple docstring""" a = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" ) a = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def A ( self : List[str] ) -> int: """simple docstring""" a = "the [MASK] of Belgium is Brussels" a = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" ) a = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" ) a = tokenizer(__lowerCAmelCase , return_tensors="pt" ) with torch.no_grad(): a = model(encoding.input_ids ).logits a = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , "capital" )
712
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def A ( self : Optional[Any] ) -> int: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = ConvNextVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" a = ConvNextVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = ConvNextVaModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : Tuple ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def A ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" pass def A ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ]: continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : List[Any] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
32
0
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :int ): '''simple docstring''' a = [0 for i in range(r + 1 )] # nc0 = 1 a = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a = min(UpperCAmelCase__ , UpperCAmelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
713
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self : List[str] ) -> List[str]: """simple docstring""" a = "" a = "" a = [] a = 0 a = 256 a = 0 a = 0 a = 0 a = 0 def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) a = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def A ( self : Any ) -> int: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def A ( self : Any ) -> int: """simple docstring""" cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') A_ : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Dict = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Union[str, Any] = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys A_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
714
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" a = self.unet.config.sample_size a = (batch_size, 3, img_size, img_size) a = self.unet a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma a = sample.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) self.scheduler.set_sigmas(__lowerCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # prediction step a = model(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) a , a = output.prev_sample, output.prev_sample_mean a = sample_mean.clamp(0 , 1 ) a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__lowerCAmelCase )
32
0
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _lowercase ( unittest.TestCase ): def A ( self : str ) -> Optional[int]: """simple docstring""" a = "ylacombe/bark-small" a = tempfile.mkdtemp() a = "en_speaker_1" a = "This is a test string" a = "speaker_embeddings_path.json" a = "speaker_embeddings" def A ( self : Any , **__lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def A ( self : Any ) -> List[Any]: """simple docstring""" a = self.get_tokenizer() a = BarkProcessor(tokenizer=__lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) a = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def A ( self : List[str] ) -> Dict: """simple docstring""" a = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) a = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def A ( self : Optional[Any] ) -> Dict: """simple docstring""" a = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) a = 35 a = 2 a = 8 a = { "semantic_prompt": np.ones(__lowerCAmelCase ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset a = processor(text=self.input_string , voice_preset=__lowerCAmelCase ) a = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file a = os.path.join(self.tmpdirname , "file.npz" ) np.savez(__lowerCAmelCase , **__lowerCAmelCase ) a = processor(text=self.input_string , voice_preset=__lowerCAmelCase ) a = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub a = processor(text=self.input_string , voice_preset=self.voice_preset ) def A ( self : Optional[Any] ) -> List[Any]: """simple docstring""" a = self.get_tokenizer() a = BarkProcessor(tokenizer=__lowerCAmelCase ) a = processor(text=self.input_string ) a = tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
715
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES A_ : int = logging.get_logger(__name__) A_ : List[Any] = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) A_ : str = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) A_ : List[Any] = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) A_ : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) A_ : Optional[Any] = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) A_ : List[str] = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) A_ : Optional[int] = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) A_ : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) A_ : int = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) A_ : str = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) A_ : int = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) A_ : Dict = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) A_ : Optional[Any] = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) A_ : Any = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) A_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) A_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) A_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) A_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) A_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) A_ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) A_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) A_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) A_ : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) A_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) A_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_MAPPING A_ : List[str] = auto_class_update(FlaxAutoModel) class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING A_ : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING A_ : Optional[int] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING A_ : int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING A_ : Tuple = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A_ : List[str] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING A_ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING A_ : Any = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING A_ : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING A_ : Union[str, Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING A_ : Tuple = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING A_ : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class _lowercase ( _BaseAutoModelClass ): _UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING A_ : Dict = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
716
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) a = import_module("tasks" ) try: a = getattr(UpperCAmelCase__ , model_args.task_type ) a = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task a = token_classification_task.get_labels(data_args.labels ) a = dict(enumerate(UpperCAmelCase__ ) ) a = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) a = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]: a = np.argmax(UpperCAmelCase__ , axis=2 ) a , a = preds.shape a = [[] for _ in range(UpperCAmelCase__ )] a = [[] for _ in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict: a , a = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), "precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), "recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), "f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } # Data collator a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(UpperCAmelCase__ ) # Predict if training_args.do_predict: a = TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) a , a , a = trainer.predict(UpperCAmelCase__ ) a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) a = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions a = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return results def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
32
0
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values A_ : Dict = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') A_ : List[Any] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') A_ : Optional[Any] = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: A_ : List[Any] = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) A_ : Tuple = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
717
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
718
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : List[str] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
32
0
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _lowercase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes _UpperCAmelCase = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)] def UpperCAmelCase__ ( ): '''simple docstring''' if os.name == "nt": a = CursorInfo() a = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) a = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("\033[?25l" ) sys.stdout.flush() def UpperCAmelCase__ ( ): '''simple docstring''' if os.name == "nt": a = CursorInfo() a = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) a = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) ) elif os.name == "posix": sys.stdout.write("\033[?25h" ) sys.stdout.flush() @contextmanager def UpperCAmelCase__ ( ): '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
719
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any: """simple docstring""" a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : str ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a , a , a , a , a = self.prepare_config_and_inputs() a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): a = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) a = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : int ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer is not a generative model" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : List[str] ) -> Any: """simple docstring""" pass def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self : str ) -> Dict: """simple docstring""" a = (self.model_tester.min_size,) * 2 a = { "pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : List[str] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ : int = 1E-4 def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[int]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) a = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : int ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) a = inputs["pixel_values"].to(__lowerCAmelCase ) a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]] a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :int = 10_00 ): '''simple docstring''' a , a = 1, 1 a = [] for i in range(1 , n + 1 ): a = prev_numerator + 2 * prev_denominator a = prev_numerator + prev_denominator if len(str(UpperCAmelCase__ ) ) > len(str(UpperCAmelCase__ ) ): result.append(UpperCAmelCase__ ) a = numerator a = denominator return len(UpperCAmelCase__ ) if __name__ == "__main__": print(F"""{solution() = }""")
720
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = [[1, 2, 4], [1, 2, 3, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(__lowerCAmelCase ) # fails here def A ( self : int ) -> Any: """simple docstring""" a = [[1, 2, 3], [1, 2, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(3 ) a = stepped is True and completed is True and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCAmelCase__ ( ): '''simple docstring''' a = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCAmelCase__ ) a = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=UpperCAmelCase__ ) env_command_parser(subparsers=UpperCAmelCase__ ) launch_command_parser(subparsers=UpperCAmelCase__ ) tpu_command_parser(subparsers=UpperCAmelCase__ ) test_command_parser(subparsers=UpperCAmelCase__ ) # Let's go a = parser.parse_args() if not hasattr(UpperCAmelCase__ , "func" ): parser.print_help() exit(1 ) # Run args.func(UpperCAmelCase__ ) if __name__ == "__main__": main()
721
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
32
0
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer A_ : Optional[Any] = logging.get_logger(__name__) A_ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ : str = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } A_ : List[str] = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } A_ : Optional[Any] = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } A_ : str = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } A_ : Dict = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } A_ : int = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } A_ : Optional[Any] = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } A_ : int = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } A_ : int = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION A_ : Optional[int] = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) A_ : Union[str, Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) A_ : List[Any] = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(UpperCAmelCase__ ) class _lowercase : def __call__( self : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Union[bool, str] = False , __lowerCAmelCase : Union[bool, str] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = None , **__lowerCAmelCase : int , ) -> BatchEncoding: """simple docstring""" if titles is None and texts is None: return super().__call__( __lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) elif titles is None or texts is None: a = titles if texts is None else texts return super().__call__( __lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) a = titles if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) else [titles] a = texts if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) else [texts] a = len(__lowerCAmelCase ) a = questions if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) else [questions] * n_passages if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError( f"""There should be as many titles than texts but got {len(__lowerCAmelCase )} titles and {len(__lowerCAmelCase )} texts.""" ) a = super().__call__(__lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )["input_ids"] a = super().__call__(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )["input_ids"] a = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__lowerCAmelCase , __lowerCAmelCase ) ] } if return_attention_mask is not False: a = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) a = attention_mask return self.pad(__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) def A ( self : Optional[int] , __lowerCAmelCase : BatchEncoding , __lowerCAmelCase : DPRReaderOutput , __lowerCAmelCase : int = 16 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]: """simple docstring""" a = reader_input["input_ids"] a , a , a = reader_output[:3] a = len(__lowerCAmelCase ) a = sorted(range(__lowerCAmelCase ) , reverse=__lowerCAmelCase , key=relevance_logits.__getitem__ ) a = [] for doc_id in sorted_docs: a = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: a = sequence_ids.index(self.pad_token_id ) else: a = len(__lowerCAmelCase ) a = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCAmelCase , top_spans=__lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCAmelCase , start_index=__lowerCAmelCase , end_index=__lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def A ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ) -> List[DPRSpanPrediction]: """simple docstring""" a = [] for start_index, start_score in enumerate(__lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) a = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] , reverse=__lowerCAmelCase ) a = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" ) a = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase = ['''input_ids''', '''attention_mask''']
700
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' ) class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> int: """simple docstring""" a = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase ) @slow def A ( self : Optional[int] ) -> List[str]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def A ( self : Dict ) -> Any: """simple docstring""" a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
32
0
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowercase ( UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = DanceDiffusionPipeline _UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''callback''', '''latents''', '''callback_steps''', '''output_type''', '''num_images_per_prompt''', } _UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : Union[str, Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) a = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowerCAmelCase , use_timestep_embedding=__lowerCAmelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , ) a = IPNDMScheduler() a = { "unet": unet, "scheduler": scheduler, } return components def A ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=0 ) -> Tuple: """simple docstring""" if str(__lowerCAmelCase ).startswith("mps" ): a = torch.manual_seed(__lowerCAmelCase ) else: a = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) a = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def A ( self : Tuple ) -> List[Any]: """simple docstring""" a = "cpu" # ensure determinism for the device-dependent torch.Generator a = self.get_dummy_components() a = DanceDiffusionPipeline(**__lowerCAmelCase ) a = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a = self.get_dummy_inputs(__lowerCAmelCase ) a = pipe(**__lowerCAmelCase ) a = output.audios a = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) a = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def A ( self : Dict ) -> List[str]: """simple docstring""" return super().test_save_load_local() @skip_mps def A ( self : int ) -> List[str]: """simple docstring""" return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return super().test_save_load_optional_components() @skip_mps def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return super().test_attention_slicing_forward_pass() def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): def A ( self : Any ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : Tuple ) -> List[Any]: """simple docstring""" a = torch_device a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" ) a = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a = torch.manual_seed(0 ) a = pipe(generator=__lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 ) a = output.audios a = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : Optional[Any] ) -> Any: """simple docstring""" a = torch_device a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa ) a = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a = torch.manual_seed(0 ) a = pipe(generator=__lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 ) a = output.audios a = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : str=32 , __lowerCAmelCase : int=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=37 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Dict=None , ) -> Optional[int]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope a = self.vocab_size - 1 def A ( self : Dict ) -> str: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def A ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : Tuple ) -> str: """simple docstring""" a = OpenAIGPTModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = OpenAIGPTLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , *__lowerCAmelCase : Dict ) -> Tuple: """simple docstring""" a = self.num_labels a = OpenAIGPTForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _UpperCAmelCase = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _UpperCAmelCase = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def A ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def A ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=False ) -> Union[str, Any]: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": a = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , ) a = inputs_dict["labels"] a = inputs_dict["labels"] a = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : List[str] ) -> str: """simple docstring""" a = OpenAIGPTModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=37 ) def A ( self : Optional[Any] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase ) def A ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase ) def A ( self : int ) -> List[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase ) @slow def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = OpenAIGPTModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[int] ) -> Dict: """simple docstring""" a = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCAmelCase ) a = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCAmelCase ) # the president is a = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the a = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase )
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0