code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = [False] * len(lowerCAmelCase_ ) _snake_case : Tuple = [] queue.append(lowerCAmelCase_ ) _snake_case : Any = True while queue: _snake_case : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowerCAmelCase_ ) _snake_case : Optional[Any] = True _snake_case : List[str] = u return visited[t] def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = [-1] * (len(lowerCAmelCase_ )) _snake_case : List[str] = 0 while bfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[Any] = float('''Inf''' ) _snake_case : List[str] = sink while s != source: # Find the minimum value in select path _snake_case : Optional[int] = min(lowerCAmelCase_ , graph[parent[s]][s] ) _snake_case : Union[str, Any] = parent[s] max_flow += path_flow _snake_case : Optional[int] = sink while v != source: _snake_case : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _snake_case : List[Any] = parent[v] return max_flow UpperCAmelCase : str = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] UpperCAmelCase, UpperCAmelCase : Union[str, Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' import numpy # List of input, output pairs UpperCAmelCase : List[Any] = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) UpperCAmelCase : Optional[int] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) UpperCAmelCase : str = [2, 4, 1, 5] UpperCAmelCase : int = len(train_data) UpperCAmelCase : List[str] = 0.0_09 def _a ( lowerCAmelCase_ , lowerCAmelCase_="train" ): """simple docstring""" return calculate_hypothesis_value(lowerCAmelCase_ , lowerCAmelCase_ ) - output( lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 for i in range(len(lowerCAmelCase_ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _a ( lowerCAmelCase_ , lowerCAmelCase_=m ): """simple docstring""" _snake_case : Optional[int] = 0 for i in range(lowerCAmelCase_ ): if index == -1: summation_value += _error(lowerCAmelCase_ ) else: summation_value += _error(lowerCAmelCase_ ) * train_data[i][0][index] return summation_value def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : List[str] = summation_of_cost_derivative(lowerCAmelCase_ , lowerCAmelCase_ ) / m return cost_derivative_value def _a ( ): """simple docstring""" global parameter_vector # Tune these values to set a tolerance value for predicted output _snake_case : str = 0.000_002 _snake_case : Optional[int] = 0 _snake_case : Optional[int] = 0 while True: j += 1 _snake_case : List[str] = [0, 0, 0, 0] for i in range(0 , len(lowerCAmelCase_ ) ): _snake_case : Optional[Any] = get_cost_derivative(i - 1 ) _snake_case : Tuple = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ , rtol=lowerCAmelCase_ , ): break _snake_case : Any = temp_parameter_vector print(('''Number of iterations:''', j) ) def _a ( ): """simple docstring""" for i in range(len(lowerCAmelCase_ ) ): print(('''Actual output value:''', output(lowerCAmelCase_ , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(lowerCAmelCase_ , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
47
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
1
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase (a__ , unittest.TestCase ): _lowercase : Optional[int] = KandinskyVaaPriorPipeline _lowercase : Optional[int] = ["""prompt"""] _lowercase : Union[str, Any] = ["""prompt""", """negative_prompt"""] _lowercase : Dict = [ """num_images_per_prompt""", """generator""", """num_inference_steps""", """latents""", """negative_prompt""", """guidance_scale""", """output_type""", """return_dict""", ] _lowercase : List[str] = False @property def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" return 32 @property def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" return 32 @property def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" return self.time_input_dim @property def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" return 100 @property def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) _snake_case : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(lowercase__ ) @property def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) _snake_case : int = { '''num_attention_heads''': 2, '''attention_head_dim''': 12, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } _snake_case : str = PriorTransformer(**lowercase__ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 _snake_case : int = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) _snake_case : List[str] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) _snake_case : List[str] = CLIPVisionModelWithProjection(lowercase__ ) return model @property def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : int = CLIPImageProcessor( crop_size=224 , do_center_crop=lowercase__ , do_normalize=lowercase__ , do_resize=lowercase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , ) return image_processor def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : Optional[int] = self.dummy_prior _snake_case : Dict = self.dummy_image_encoder _snake_case : List[str] = self.dummy_text_encoder _snake_case : Dict = self.dummy_tokenizer _snake_case : Dict = self.dummy_image_processor _snake_case : str = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=10.0 , ) _snake_case : Union[str, Any] = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def UpperCAmelCase_ ( self , lowercase__ , lowercase__=0 ) -> List[str]: """simple docstring""" if str(lowercase__ ).startswith('''mps''' ): _snake_case : List[str] = torch.manual_seed(lowercase__ ) else: _snake_case : Dict = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) _snake_case : Any = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : Optional[int] = '''cpu''' _snake_case : Dict = self.get_dummy_components() _snake_case : Dict = self.pipeline_class(**lowercase__ ) _snake_case : Tuple = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) _snake_case : Tuple = pipe(**self.get_dummy_inputs(lowercase__ ) ) _snake_case : Any = output.image_embeds _snake_case : Union[str, Any] = pipe( **self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0] _snake_case : str = image[0, -10:] _snake_case : Any = image_from_tuple[0, -10:] assert image.shape == (1, 32) _snake_case : List[Any] = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : int = torch_device == '''cpu''' _snake_case : List[str] = True _snake_case : List[str] = False self._test_inference_batch_single_identical( test_max_difference=lowercase__ , relax_max_difference=lowercase__ , test_mean_pixel_difference=lowercase__ , ) @skip_mps def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : int = torch_device == '''cpu''' _snake_case : List[str] = False self._test_attention_slicing_forward_pass( test_max_difference=lowercase__ , test_mean_pixel_difference=lowercase__ , )
47
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
1
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ): """simple docstring""" model.train() _snake_case : Optional[int] = model(lowerCAmelCase_ ) _snake_case : Any = F.mse_loss(lowerCAmelCase_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" set_seed(42 ) _snake_case : Optional[Any] = RegressionModel() _snake_case : Dict = deepcopy(lowerCAmelCase_ ) _snake_case : Union[str, Any] = RegressionDataset(length=80 ) _snake_case : List[Any] = DataLoader(lowerCAmelCase_ , batch_size=16 ) model.to(accelerator.device ) if sched: _snake_case : List[str] = AdamW(params=model.parameters() , lr=1E-3 ) _snake_case : str = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _snake_case : str = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda lowerCAmelCase_ : epoch**0.65 ) _snake_case : Any = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda lowerCAmelCase_ : epoch**0.65 ) # Make a copy of `model` if sched: _snake_case , _snake_case , _snake_case , _snake_case : str = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: _snake_case , _snake_case : str = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : Dict = get_training_setup(lowerCAmelCase_ ) # Use a single batch _snake_case , _snake_case : Any = next(iter(lowerCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _snake_case , _snake_case : int = accelerator.gather((ddp_input, ddp_target) ) _snake_case , _snake_case : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase_ ): step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: # Sync grads step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) _snake_case : Dict = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : int = get_training_setup(lowerCAmelCase_ ) # Use a single batch _snake_case , _snake_case : int = next(iter(lowerCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _snake_case , _snake_case : Optional[int] = accelerator.gather((ddp_input, ddp_target) ) _snake_case , _snake_case : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase_ ): step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: # Sync grads step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) _snake_case : List[Any] = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )] def _a ( lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Any = Accelerator( split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _snake_case , _snake_case , _snake_case : Union[str, Any] = get_training_setup(lowerCAmelCase_ ) for iteration, batch in enumerate(lowerCAmelCase_ ): _snake_case , _snake_case : Union[str, Any] = batch.values() # Gather the distributed inputs and targs for the base model _snake_case , _snake_case : List[Any] = accelerator.gather((ddp_input, ddp_target) ) _snake_case , _snake_case : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCAmelCase_ ): step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) _snake_case : str = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )] GradientState._reset_state() def _a ( lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Union[str, Any] = Accelerator( split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = get_training_setup(lowerCAmelCase_ , lowerCAmelCase_ ) for iteration, batch in enumerate(lowerCAmelCase_ ): _snake_case , _snake_case : Dict = batch.values() # Gather the distributed inputs and targs for the base model _snake_case , _snake_case : Any = accelerator.gather((ddp_input, ddp_target) ) _snake_case , _snake_case : List[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCAmelCase_ ): step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' _snake_case : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase_ )) if accelerator.num_processes > 1: check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def _a ( ): """simple docstring""" _snake_case : int = Accelerator() _snake_case : Optional[int] = RegressionDataset(length=80 ) _snake_case : Tuple = DataLoader(lowerCAmelCase_ , batch_size=16 ) _snake_case : Optional[int] = RegressionDataset(length=96 ) _snake_case : Tuple = DataLoader(lowerCAmelCase_ , batch_size=16 ) _snake_case , _snake_case : Optional[int] = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ ) if iteration < len(lowerCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ ) if batch_num < len(lowerCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _a ( ): """simple docstring""" _snake_case : Dict = Accelerator() _snake_case : Optional[Any] = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(lowerCAmelCase_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(lowerCAmelCase_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(lowerCAmelCase_ , lowerCAmelCase_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
47
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
1
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, oder?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] _snake_case : Dict = { '''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''], '''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''], '''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''], '''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''], } _snake_case : List[Any] = f'''{src_lang}-{tgt_lang}''' _snake_case : Optional[int] = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) _snake_case : int = os.path.join(lowerCAmelCase_ , '''README.md''' ) print(f'''Generating {path}''' ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(lowerCAmelCase_ ) # make sure we are under the root of the project UpperCAmelCase : str = Path(__file__).resolve().parent.parent.parent UpperCAmelCase : int = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCAmelCase, UpperCAmelCase, UpperCAmelCase : Union[str, Any] = model_name.split('-') UpperCAmelCase : Optional[int] = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
47
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class lowerCamelCase : def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=64 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> int: """simple docstring""" _snake_case : Any = parent _snake_case : Any = batch_size _snake_case : List[Any] = seq_length _snake_case : Optional[int] = is_training _snake_case : str = use_input_mask _snake_case : Union[str, Any] = use_token_type_ids _snake_case : Union[str, Any] = use_labels _snake_case : List[str] = vocab_size _snake_case : Union[str, Any] = hidden_size _snake_case : Tuple = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Any = intermediate_size _snake_case : List[Any] = hidden_act _snake_case : Optional[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : List[Any] = max_position_embeddings _snake_case : int = type_vocab_size _snake_case : Union[str, Any] = type_sequence_label_size _snake_case : Dict = initializer_range _snake_case : Any = num_labels _snake_case : List[str] = num_choices _snake_case : List[Any] = scope _snake_case : Dict = vocab_size - 1 def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Dict = None if self.use_input_mask: _snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case : Dict = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.prepare_config_and_inputs() _snake_case : List[str] = True return config, input_ids, input_mask, token_labels def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : List[Any] = GPTNeoXModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : List[str] = model(lowercase__ , attention_mask=lowercase__ ) _snake_case : List[str] = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : Union[str, Any] = True _snake_case : int = GPTNeoXModel(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : str = model(lowercase__ , attention_mask=lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = GPTNeoXForCausalLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : List[str] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" _snake_case : Optional[Any] = self.num_labels _snake_case : Dict = GPTNeoXForQuestionAnswering(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : int = model(lowercase__ , attention_mask=lowercase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : Union[str, Any] = self.num_labels _snake_case : Any = GPTNeoXForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : List[Any] = self.num_labels _snake_case : List[Any] = GPTNeoXForTokenClassification(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Any = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : Dict = True _snake_case : Dict = GPTNeoXForCausalLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() # first forward pass _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _snake_case : int = torch.cat([input_ids, next_tokens] , dim=-1 ) _snake_case : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 ) _snake_case : Any = model(lowercase__ , attention_mask=lowercase__ , output_hidden_states=lowercase__ ) _snake_case : Dict = output_from_no_past['''hidden_states'''][0] _snake_case : int = model( lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )['''hidden_states'''][0] # select random slice _snake_case : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _snake_case : str = output_from_no_past[:, -3:, random_slice_idx].detach() _snake_case : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : int = config_and_inputs _snake_case : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase (a__ , a__ , a__ , unittest.TestCase ): _lowercase : List[Any] = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) _lowercase : Optional[int] = (GPTNeoXForCausalLM,) if is_torch_available() else () _lowercase : Union[str, Any] = ( { """feature-extraction""": GPTNeoXModel, """question-answering""": GPTNeoXForQuestionAnswering, """text-classification""": GPTNeoXForSequenceClassification, """text-generation""": GPTNeoXForCausalLM, """token-classification""": GPTNeoXForTokenClassification, """zero-shot""": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) _lowercase : List[Any] = False _lowercase : Optional[Any] = False _lowercase : Optional[int] = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Optional[int] = GPTNeoXModelTester(self ) _snake_case : Any = ConfigTester(self , config_class=lowercase__ , hidden_size=64 , num_attention_heads=8 ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_decoder() _snake_case : Dict = None self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase__ , lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__ ) @unittest.skip(reason='''Feed forward chunking is not implemented''' ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[str]: """simple docstring""" _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : List[str] = ids_tensor([1, 10] , config.vocab_size ) _snake_case : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _snake_case : Tuple = GPTNeoXModel(lowercase__ ) original_model.to(lowercase__ ) original_model.eval() _snake_case : Tuple = original_model(lowercase__ ).last_hidden_state _snake_case : Any = original_model(lowercase__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _snake_case : str = {'''type''': scaling_type, '''factor''': 10.0} _snake_case : int = GPTNeoXModel(lowercase__ ) scaled_model.to(lowercase__ ) scaled_model.eval() _snake_case : List[str] = scaled_model(lowercase__ ).last_hidden_state _snake_case : Dict = scaled_model(lowercase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) ) @require_torch class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) for checkpointing in [True, False]: _snake_case : Tuple = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase__ ) _snake_case : List[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowercase__ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 _snake_case : Union[str, Any] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure''' _snake_case : Optional[Any] = model.generate(**lowercase__ , do_sample=lowercase__ , max_new_tokens=20 ) _snake_case : List[Any] = tokenizer.batch_decode(lowercase__ )[0] self.assertEqual(lowercase__ , lowercase__ )
47
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase : Optional[int] = 1_6 UpperCAmelCase : Any = 3_2 def _a ( lowerCAmelCase_ , lowerCAmelCase_ = 16 ): """simple docstring""" _snake_case : str = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _snake_case : Tuple = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) _snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case : Optional[Any] = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case : List[str] = 16 elif accelerator.mixed_precision != "no": _snake_case : List[Any] = 8 else: _snake_case : List[Any] = None return tokenizer.pad( lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , ) # Instantiate dataloaders. _snake_case : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ ) _snake_case : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=(accelerator.mixed_precision == '''fp8''') , ) return train_dataloader, eval_dataloader def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : Dict = config['''lr'''] _snake_case : Tuple = int(config['''num_epochs'''] ) _snake_case : List[str] = int(config['''seed'''] ) _snake_case : Tuple = int(config['''batch_size'''] ) _snake_case : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _snake_case : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _snake_case : Tuple = batch_size // MAX_GPU_BATCH_SIZE _snake_case : Dict = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase_ ) _snake_case , _snake_case : int = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : Any = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : List[str] = model.to(accelerator.device ) # Instantiate optimizer _snake_case : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) # Instantiate scheduler _snake_case : Optional[Any] = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _snake_case : Union[str, Any] = model(**lowerCAmelCase_ ) _snake_case : Dict = outputs.loss _snake_case : Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case : int = model(**lowerCAmelCase_ ) _snake_case : List[Any] = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) _snake_case : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase_ ) def _a ( ): """simple docstring""" _snake_case : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) _snake_case : str = parser.parse_args() _snake_case : Dict = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
1
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=10 ): """simple docstring""" _snake_case : Tuple = [] for _ in range(lowerCAmelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _a ( lowerCAmelCase_ , lowerCAmelCase_=10 ): """simple docstring""" _snake_case : Optional[int] = [] for step in range(lowerCAmelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _snake_case : Any = os.path.join(lowerCAmelCase_ , '''schedule.bin''' ) torch.save(scheduler.state_dict() , lowerCAmelCase_ ) _snake_case : Optional[int] = torch.load(lowerCAmelCase_ ) scheduler.load_state_dict(lowerCAmelCase_ ) return lrs @require_torch class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: """simple docstring""" self.assertEqual(len(lowercase__ ) , len(lowercase__ ) ) for a, b in zip(lowercase__ , lowercase__ ): self.assertAlmostEqual(lowercase__ , lowercase__ , delta=lowercase__ ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase__ ) _snake_case : List[str] = torch.tensor([0.4, 0.2, -0.5] ) _snake_case : Optional[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _snake_case : List[Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): _snake_case : str = criterion(lowercase__ , lowercase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase__ ) _snake_case : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] ) _snake_case : Optional[int] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _snake_case : int = Adafactor( params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase__ , weight_decay=0.0 , relative_step=lowercase__ , scale_parameter=lowercase__ , warmup_init=lowercase__ , ) for _ in range(1_000 ): _snake_case : Optional[int] = criterion(lowercase__ , lowercase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class lowerCamelCase (unittest.TestCase ): _lowercase : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _lowercase : Optional[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _lowercase : Union[str, Any] = 10 def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ) -> Optional[int]: """simple docstring""" self.assertEqual(len(lowercase__ ) , len(lowercase__ ) ) for a, b in zip(lowercase__ , lowercase__ ): self.assertAlmostEqual(lowercase__ , lowercase__ , delta=lowercase__ , msg=lowercase__ ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : str = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _snake_case : Optional[int] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _snake_case , _snake_case : Any = data _snake_case : str = scheduler_func(self.optimizer , **lowercase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _snake_case : Union[str, Any] = unwrap_schedule(lowercase__ , self.num_steps ) self.assertListAlmostEqual( lowercase__ , lowercase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) _snake_case : Union[str, Any] = scheduler_func(self.optimizer , **lowercase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(lowercase__ ) # wrap to test picklability of the schedule _snake_case : str = unwrap_and_save_reload_schedule(lowercase__ , self.num_steps ) self.assertListEqual(lowercase__ , lowercase__ , msg=F'''failed for {scheduler_func} in save and reload''' ) class lowerCamelCase : def __init__( self , lowercase__ ) -> str: """simple docstring""" _snake_case : str = fn def __call__( self , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" return self.fn(*lowercase__ , **lowercase__ ) @classmethod def UpperCAmelCase_ ( self , lowercase__ ) -> Dict: """simple docstring""" _snake_case : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
47
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase : str = { 'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST', 'NezhaForNextSentencePrediction', 'NezhaForMaskedLM', 'NezhaForPreTraining', 'NezhaForMultipleChoice', 'NezhaForQuestionAnswering', 'NezhaForSequenceClassification', 'NezhaForTokenClassification', 'NezhaModel', 'NezhaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
1
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
1
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS} def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' ) if tokenizer_name is None: _snake_case : Dict = TOKENIZER_CLASSES else: _snake_case : Any = {tokenizer_name: getattr(lowerCAmelCase_ , tokenizer_name + '''Fast''' )} logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' ) for tokenizer_name in tokenizer_names: _snake_case : Any = TOKENIZER_CLASSES[tokenizer_name] _snake_case : Optional[int] = True if checkpoint_name is None: _snake_case : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() ) else: _snake_case : Union[str, Any] = [checkpoint_name] logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' ) for checkpoint in checkpoint_names: logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' ) # Load tokenizer _snake_case : Dict = tokenizer_class.from_pretrained(lowerCAmelCase_ , force_download=lowerCAmelCase_ ) # Save fast tokenizer logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' ) # For organization names we create sub-directories if "/" in checkpoint: _snake_case , _snake_case : Dict = checkpoint.split('''/''' ) _snake_case : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) elif add_prefix: _snake_case : Any = checkpoint _snake_case : List[str] = dump_path else: _snake_case : Union[str, Any] = None _snake_case : Any = dump_path logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: _snake_case : str = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] _snake_case : List[str] = file_path.split(lowerCAmelCase_ )[-1][0] if next_char == "/": _snake_case : Optional[int] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[int] = None logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) _snake_case : List[str] = tokenizer.save_pretrained( lowerCAmelCase_ , legacy_format=lowerCAmelCase_ , filename_prefix=lowerCAmelCase_ ) logger.info(f'''=> File names {file_names}''' ) for file_name in file_names: if not file_name.endswith('''tokenizer.json''' ): os.remove(lowerCAmelCase_ ) logger.info(f'''=> removing {file_name}''' ) if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.' ) parser.add_argument( '--tokenizer_name', default=None, type=str, help=( F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """ 'download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--checkpoint_name', default=None, type=str, help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.', ) parser.add_argument( '--force_download', action='store_true', help='Re-download checkpoints.', ) UpperCAmelCase : Tuple = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
47
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def count_of_possible_combinations(lowerCAmelCase_ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def count_of_possible_combinations_with_dp_array( lowerCAmelCase_ , lowerCAmelCase_ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] _snake_case : Optional[Any] = sum( count_of_possible_combinations_with_dp_array(target - item , lowerCAmelCase_ ) for item in array ) _snake_case : str = answer return answer _snake_case : Union[str, Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = [0] * (target + 1) _snake_case : int = 1 for i in range(1 , target + 1 ): for j in range(lowerCAmelCase_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : Union[str, Any] = 3 UpperCAmelCase : int = 5 UpperCAmelCase : int = [1, 2, 5] print(combination_sum_iv(n, array, target))
47
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
1
'''simple docstring''' import math def _a ( lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 ): """simple docstring""" _snake_case : Union[str, Any] = end or len(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = i _snake_case : Optional[int] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _snake_case : Any = array[temp_index - 1] temp_index -= 1 _snake_case : Optional[int] = temp_index_value return array def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): # Max Heap """simple docstring""" _snake_case : Tuple = index _snake_case : Optional[Any] = 2 * index + 1 # Left Node _snake_case : Any = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _snake_case : Optional[int] = left_index if right_index < heap_size and array[largest] < array[right_index]: _snake_case : Optional[int] = right_index if largest != index: _snake_case , _snake_case : List[str] = array[largest], array[index] heapify(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = len(lowerCAmelCase_ ) for i in range(n // 2 , -1 , -1 ): heapify(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(n - 1 , 0 , -1 ): _snake_case , _snake_case : Optional[int] = array[0], array[i] heapify(lowerCAmelCase_ , 0 , lowerCAmelCase_ ) return array def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = low _snake_case : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _snake_case , _snake_case : Dict = array[j], array[i] i += 1 def _a ( lowerCAmelCase_ ): """simple docstring""" if len(lowerCAmelCase_ ) == 0: return array _snake_case : Optional[int] = 2 * math.ceil(math.loga(len(lowerCAmelCase_ ) ) ) _snake_case : Union[str, Any] = 16 return intro_sort(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(lowerCAmelCase_ ) max_depth -= 1 _snake_case : Tuple = median_of_a(lowerCAmelCase_ , lowerCAmelCase_ , start + ((end - start) // 2) + 1 , end - 1 ) _snake_case : Any = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) intro_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[int] = p return insertion_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : Optional[Any] = input('Enter numbers separated by a comma : ').strip() UpperCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def _a ( lowerCAmelCase_ ): """simple docstring""" if not sentence: return "" _snake_case : List[str] = dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
47
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : int = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class lowerCamelCase (a__ ): _lowercase : Optional[int] = """glpn""" def __init__( self , lowercase__=3 , lowercase__=4 , lowercase__=[2, 2, 2, 2] , lowercase__=[8, 4, 2, 1] , lowercase__=[32, 64, 160, 256] , lowercase__=[7, 3, 3, 3] , lowercase__=[4, 2, 2, 2] , lowercase__=[1, 2, 5, 8] , lowercase__=[4, 4, 4, 4] , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0.1 , lowercase__=1E-6 , lowercase__=64 , lowercase__=10 , lowercase__=-1 , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__(**lowercase__ ) _snake_case : Tuple = num_channels _snake_case : Any = num_encoder_blocks _snake_case : Tuple = depths _snake_case : Union[str, Any] = sr_ratios _snake_case : List[str] = hidden_sizes _snake_case : Union[str, Any] = patch_sizes _snake_case : Optional[int] = strides _snake_case : Optional[int] = mlp_ratios _snake_case : int = num_attention_heads _snake_case : Dict = hidden_act _snake_case : int = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Dict = initializer_range _snake_case : Union[str, Any] = drop_path_rate _snake_case : Optional[int] = layer_norm_eps _snake_case : str = decoder_hidden_size _snake_case : Union[str, Any] = max_depth _snake_case : Any = head_in_index
47
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
1
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef UpperCAmelCase : Optional[int] = ( 'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ ) requires_backends(lowerCAmelCase_ , '''sklearn''' ) return (preds == labels).mean() def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ ) requires_backends(lowerCAmelCase_ , '''sklearn''' ) _snake_case : Optional[Any] = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Dict = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ ) requires_backends(lowerCAmelCase_ , '''sklearn''' ) _snake_case : str = pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] _snake_case : List[str] = spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ ) requires_backends(lowerCAmelCase_ , '''sklearn''' ) assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), f'''Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}''' if task_name == "cola": return {"mcc": matthews_corrcoef(lowerCAmelCase_ , lowerCAmelCase_ )} elif task_name == "sst-2": return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )} elif task_name == "mrpc": return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ ) elif task_name == "sts-b": return pearson_and_spearman(lowerCAmelCase_ , lowerCAmelCase_ ) elif task_name == "qqp": return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )} elif task_name == "qnli": return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )} elif task_name == "rte": return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )} elif task_name == "wnli": return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )} elif task_name == "hans": return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )} else: raise KeyError(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ ) requires_backends(lowerCAmelCase_ , '''sklearn''' ) if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError(f'''Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}''' ) if task_name == "xnli": return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )} else: raise KeyError(lowerCAmelCase_ )
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
1
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _a ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCAmelCase_ ): requests.request('''GET''' , '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 ) @pytest.mark.integration def _a ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''' , '''https://huggingface.co''' ) def _a ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCAmelCase_ ): http_head('''https://huggingface.co''' )
47
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if len(lowerCAmelCase_ ) <= 1: return lst _snake_case : Union[str, Any] = 1 while i < len(lowerCAmelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: _snake_case , _snake_case : List[str] = lst[i], lst[i - 1] i -= 1 if i == 0: _snake_case : Optional[int] = 1 return lst if __name__ == "__main__": UpperCAmelCase : str = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import os from distutils.util import strtobool def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for e in env_keys: _snake_case : List[str] = int(os.environ.get(lowerCAmelCase_ , -1 ) ) if val >= 0: return val return default def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : List[Any] = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) return strtobool(lowerCAmelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int... def _a ( lowerCAmelCase_ , lowerCAmelCase_="no" ): """simple docstring""" _snake_case : List[str] = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) return value
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
1
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase (a__ ): def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=2 , lowercase__=99 , lowercase__=0 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=12 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__="last" , lowercase__=None , lowercase__=None , ) -> Union[str, Any]: """simple docstring""" _snake_case : Union[str, Any] = parent _snake_case : int = batch_size _snake_case : Union[str, Any] = seq_length _snake_case : Tuple = is_training _snake_case : int = use_input_lengths _snake_case : List[str] = use_token_type_ids _snake_case : List[Any] = use_labels _snake_case : Union[str, Any] = gelu_activation _snake_case : int = sinusoidal_embeddings _snake_case : Union[str, Any] = causal _snake_case : Any = asm _snake_case : Union[str, Any] = n_langs _snake_case : int = vocab_size _snake_case : str = n_special _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Union[str, Any] = num_attention_heads _snake_case : Tuple = hidden_dropout_prob _snake_case : str = attention_probs_dropout_prob _snake_case : Tuple = max_position_embeddings _snake_case : List[str] = type_vocab_size _snake_case : Dict = type_sequence_label_size _snake_case : Dict = initializer_range _snake_case : Optional[int] = num_labels _snake_case : Tuple = num_choices _snake_case : List[str] = summary_type _snake_case : Union[str, Any] = use_proj _snake_case : List[Any] = scope def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : str = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : Union[str, Any] = None if self.use_input_lengths: _snake_case : Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _snake_case : Union[str, Any] = None if self.use_token_type_ids: _snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _snake_case : Any = None _snake_case : Union[str, Any] = None _snake_case : List[Any] = None if self.use_labels: _snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float() _snake_case : str = ids_tensor([self.batch_size] , self.num_choices ) _snake_case : Union[str, Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Tuple: """simple docstring""" _snake_case : List[Any] = FlaubertModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Dict = model(lowercase__ , lengths=lowercase__ , langs=lowercase__ ) _snake_case : Dict = model(lowercase__ , langs=lowercase__ ) _snake_case : Union[str, Any] = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Any: """simple docstring""" _snake_case : List[str] = FlaubertWithLMHeadModel(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Optional[int] = model(lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Tuple: """simple docstring""" _snake_case : List[Any] = FlaubertForQuestionAnsweringSimple(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : List[str] = model(lowercase__ ) _snake_case : Tuple = model(lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> str: """simple docstring""" _snake_case : Union[str, Any] = FlaubertForQuestionAnswering(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : int = model(lowercase__ ) _snake_case : Dict = model( lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , cls_index=lowercase__ , is_impossible=lowercase__ , p_mask=lowercase__ , ) _snake_case : str = model( lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , cls_index=lowercase__ , is_impossible=lowercase__ , ) ((_snake_case) , ) : Tuple = result_with_labels.to_tuple() _snake_case : List[Any] = model(lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ ) ((_snake_case) , ) : Dict = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Dict: """simple docstring""" _snake_case : Any = FlaubertForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : List[str] = model(lowercase__ ) _snake_case : str = model(lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Optional[Any]: """simple docstring""" _snake_case : Tuple = self.num_labels _snake_case : Union[str, Any] = FlaubertForTokenClassification(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> int: """simple docstring""" _snake_case : str = self.num_choices _snake_case : List[Any] = FlaubertForMultipleChoice(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case : Dict = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Optional[int] = config_and_inputs _snake_case : Dict = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) _lowercase : int = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__=False ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": _snake_case : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase__ ) _snake_case : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase__ ) return inputs_dict def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Dict = FlaubertModelTester(self ) _snake_case : Optional[int] = ConfigTester(self , config_class=lowercase__ , emb_dim=37 ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowercase__ ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = FlaubertModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return _snake_case : List[str] = True _snake_case : List[Any] = model_class(config=lowercase__ ) _snake_case : str = self._prepare_for_class(lowercase__ , lowercase__ ) _snake_case : int = torch.jit.trace( lowercase__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowercase__ , os.path.join(lowercase__ , '''traced_model.pt''' ) ) _snake_case : Dict = torch.jit.load(os.path.join(lowercase__ , '''traced_model.pt''' ) , map_location=lowercase__ ) loaded(inputs_dict['''input_ids'''].to(lowercase__ ) , inputs_dict['''attention_mask'''].to(lowercase__ ) ) @require_torch class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : Tuple = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) _snake_case : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): _snake_case : str = model(lowercase__ )[0] _snake_case : int = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowercase__ ) _snake_case : List[Any] = torch.tensor( [[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
47
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = '''''' for i in table: res += inp[i - 1] return res def _a ( lowerCAmelCase_ ): """simple docstring""" return data[1:] + data[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Union[str, Any] = '''''' for i in range(len(lowerCAmelCase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = int('''0b''' + data[0] + data[-1] , 2 ) _snake_case : Union[str, Any] = int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = message[:4] _snake_case : List[Any] = message[4:] _snake_case : List[Any] = apply_table(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = xor(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[Any] = apply_sbox(lowerCAmelCase_ , temp[:4] ) # noqa: E741 _snake_case : List[str] = apply_sbox(lowerCAmelCase_ , temp[4:] ) _snake_case : int = '''0''' * (2 - len(lowerCAmelCase_ )) + l # noqa: E741 _snake_case : Dict = '''0''' * (2 - len(lowerCAmelCase_ )) + r _snake_case : List[Any] = apply_table(l + r , lowerCAmelCase_ ) _snake_case : Dict = xor(lowerCAmelCase_ , lowerCAmelCase_ ) return temp + right if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = input('Enter 10 bit key: ') UpperCAmelCase : Union[str, Any] = input('Enter 8 bit message: ') UpperCAmelCase : str = [6, 3, 7, 4, 8, 5, 1_0, 9] UpperCAmelCase : str = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] UpperCAmelCase : Dict = [2, 4, 3, 1] UpperCAmelCase : str = [2, 6, 3, 1, 4, 8, 5, 7] UpperCAmelCase : Union[str, Any] = [4, 1, 3, 5, 7, 2, 8, 6] UpperCAmelCase : List[str] = [4, 1, 2, 3, 2, 3, 4, 1] UpperCAmelCase : Dict = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] UpperCAmelCase : Optional[int] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation UpperCAmelCase : Dict = apply_table(key, paa_table) UpperCAmelCase : Optional[int] = temp[:5] UpperCAmelCase : Dict = temp[5:] UpperCAmelCase : int = left_shift(left) UpperCAmelCase : str = left_shift(right) UpperCAmelCase : Dict = apply_table(left + right, pa_table) UpperCAmelCase : str = left_shift(left) UpperCAmelCase : str = left_shift(right) UpperCAmelCase : List[str] = left_shift(left) UpperCAmelCase : Union[str, Any] = left_shift(right) UpperCAmelCase : Optional[Any] = apply_table(left + right, pa_table) # encryption UpperCAmelCase : Tuple = apply_table(message, IP) UpperCAmelCase : Dict = function(expansion, sa, sa, keya, temp) UpperCAmelCase : List[Any] = temp[4:] + temp[:4] UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print('Cipher text is:', CT) # decryption UpperCAmelCase : List[Any] = apply_table(CT, IP) UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) UpperCAmelCase : Optional[int] = temp[4:] + temp[:4] UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) UpperCAmelCase : Optional[Any] = apply_table(temp, IP_inv) print('Plain text after decypting is:', PT)
47
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if mass < 0: raise ValueError('''The mass of a body cannot be negative''' ) return 0.5 * mass * abs(lowerCAmelCase_ ) * abs(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
47
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
1
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCAmelCase : int = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
1
'''simple docstring''' from __future__ import annotations import math def _a ( lowerCAmelCase_ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = str(lowerCAmelCase_ ) _snake_case : Dict = [n] for i in range(1 , len(lowerCAmelCase_ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _a ( lowerCAmelCase_ ): """simple docstring""" if len(str(lowerCAmelCase_ ) ) > 3: if not is_prime(int(str(lowerCAmelCase_ )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase_ )[:3] ) ): return False return True def _a ( lowerCAmelCase_ = 11 ): """simple docstring""" _snake_case : list[int] = [] _snake_case : str = 13 while len(lowerCAmelCase_ ) != count: if validate(lowerCAmelCase_ ): _snake_case : Dict = list_truncated_nums(lowerCAmelCase_ ) if all(is_prime(lowerCAmelCase_ ) for i in list_nums ): list_truncated_primes.append(lowerCAmelCase_ ) num += 2 return list_truncated_primes def _a ( ): """simple docstring""" return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F"""{sum(compute_truncated_primes(1_1)) = }""")
47
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' from __future__ import annotations import bisect def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 ): """simple docstring""" if hi < 0: _snake_case : List[str] = len(lowerCAmelCase_ ) while lo < hi: _snake_case : List[str] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: _snake_case : List[Any] = mid + 1 else: _snake_case : str = mid return lo def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 ): """simple docstring""" if hi < 0: _snake_case : int = len(lowerCAmelCase_ ) while lo < hi: _snake_case : Tuple = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: _snake_case : Any = mid + 1 else: _snake_case : List[Any] = mid return lo def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 ): """simple docstring""" sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 ): """simple docstring""" sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = 0 _snake_case : int = len(lowerCAmelCase_ ) - 1 while left <= right: _snake_case : Union[str, Any] = left + (right - left) // 2 _snake_case : Dict = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: _snake_case : Any = midpoint - 1 else: _snake_case : Dict = midpoint + 1 return None def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ ) if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item: return index return None def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if right < left: return None _snake_case : Union[str, Any] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 ) else: return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : int = input('Enter numbers separated by comma:\n').strip() UpperCAmelCase : int = sorted(int(item) for item in user_input.split(',')) UpperCAmelCase : str = int(input('Enter a single number to be found in the list:\n')) UpperCAmelCase : List[Any] = binary_search(collection, target) if result is None: print(F"""{target} was not found in {collection}.""") else: print(F"""{target} was found at position {result} in {collection}.""")
47
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
1
'''simple docstring''' import math import tensorflow as tf from packaging import version def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : int = tf.convert_to_tensor(lowerCAmelCase_ ) _snake_case : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = tf.convert_to_tensor(lowerCAmelCase_ ) _snake_case : str = tf.cast(math.pi , x.dtype ) _snake_case : Dict = tf.cast(0.044_715 , x.dtype ) _snake_case : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCAmelCase_ , 3 )) )) return x * cdf def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : int = tf.convert_to_tensor(lowerCAmelCase_ ) return x * tf.tanh(tf.math.softplus(lowerCAmelCase_ ) ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Any = tf.convert_to_tensor(lowerCAmelCase_ ) _snake_case : List[str] = tf.cast(0.044_715 , x.dtype ) _snake_case : Any = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = tf.convert_to_tensor(lowerCAmelCase_ ) _snake_case : Tuple = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.clip_by_value(_gelu(lowerCAmelCase_ ) , -10 , 10 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=-1 ): """simple docstring""" _snake_case , _snake_case : Tuple = tf.split(lowerCAmelCase_ , 2 , axis=lowerCAmelCase_ ) return a * tf.math.sigmoid(lowerCAmelCase_ ) if version.parse(tf.version.VERSION) >= version.parse('2.4'): def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.keras.activations.gelu(lowerCAmelCase_ , approximate=lowerCAmelCase_ ) UpperCAmelCase : int = tf.keras.activations.gelu UpperCAmelCase : Any = approximate_gelu_wrap else: UpperCAmelCase : List[Any] = _gelu UpperCAmelCase : Any = _gelu_new UpperCAmelCase : int = { 'gelu': gelu, 'gelu_10': gelu_aa, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': tf.keras.activations.relu, 'sigmoid': tf.keras.activations.sigmoid, 'silu': tf.keras.activations.swish, 'swish': tf.keras.activations.swish, 'tanh': tf.keras.activations.tanh, } def _a ( lowerCAmelCase_ ): """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
47
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowerCamelCase : def __init__( self , lowercase__ , ) -> List[str]: """simple docstring""" _snake_case : Optional[Any] = parent _snake_case : Union[str, Any] = 13 _snake_case : Dict = 7 _snake_case : Optional[int] = 30 _snake_case : Optional[Any] = self.seq_length + self.mem_len _snake_case : Optional[Any] = 15 _snake_case : Optional[int] = True _snake_case : Union[str, Any] = True _snake_case : Dict = 99 _snake_case : Optional[Any] = [10, 50, 80] _snake_case : List[str] = 32 _snake_case : str = 32 _snake_case : Tuple = 4 _snake_case : Tuple = 8 _snake_case : Dict = 128 _snake_case : str = 2 _snake_case : Tuple = 2 _snake_case : Tuple = None _snake_case : Dict = 1 _snake_case : Optional[Any] = 0 _snake_case : List[str] = 3 _snake_case : Dict = self.vocab_size - 1 _snake_case : Tuple = 0.01 def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = None if self.use_labels: _snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[str] = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" random.seed(self.seed ) tf.random.set_seed(self.seed ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]: """simple docstring""" _snake_case : Optional[int] = TFTransfoXLModel(lowercase__ ) _snake_case , _snake_case : List[Any] = model(lowercase__ ).to_tuple() _snake_case : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a} _snake_case , _snake_case : Tuple = model(lowercase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFTransfoXLLMHeadModel(lowercase__ ) _snake_case , _snake_case : Dict = model(lowercase__ ).to_tuple() _snake_case : int = {'''input_ids''': input_ids_a, '''labels''': lm_labels} _snake_case , _snake_case : Optional[int] = model(lowercase__ ).to_tuple() _snake_case , _snake_case : List[str] = model([input_ids_a, mems_a] ).to_tuple() _snake_case : Optional[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} _snake_case , _snake_case : List[str] = model(lowercase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : Optional[Any] = TFTransfoXLForSequenceClassification(lowercase__ ) _snake_case : Dict = model(lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : Optional[Any] = self.prepare_config_and_inputs() ((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : Tuple = config_and_inputs _snake_case : int = {'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : List[str] = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) _lowercase : Tuple = () if is_tf_available() else () _lowercase : Union[str, Any] = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented _lowercase : Optional[Any] = False _lowercase : Tuple = False _lowercase : Optional[int] = False _lowercase : List[str] = False def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]: """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : List[Any] = TFTransfoXLModelTester(self ) _snake_case : Any = ConfigTester(self , config_class=lowercase__ , d_embed=37 ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" self.model_tester.set_seed() _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" self.model_tester.set_seed() _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Optional[Any] = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: _snake_case : Optional[Any] = model_class(lowercase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: _snake_case : List[Any] = model.get_output_embeddings() assert isinstance(lowercase__ , tf.keras.layers.Layer ) _snake_case : str = model.get_bias() assert name is None else: _snake_case : Optional[Any] = model.get_output_embeddings() assert x is None _snake_case : List[str] = model.get_bias() assert name is None def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" pass @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = TFTransfoXLModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" pass @require_tf class lowerCamelCase (unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off _snake_case : Union[str, Any] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off _snake_case : Optional[int] = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> _snake_case : str = model.generate(lowercase__ , max_length=200 , do_sample=lowercase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , lowercase__ )
47
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
1
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
1
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class lowerCamelCase (a__ ): def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ) -> Any: """simple docstring""" _snake_case : List[Any] = path_or_paths _snake_case : Union[str, Any] = split if split or isinstance(lowercase__ , lowercase__ ) else '''train''' _snake_case : Any = features _snake_case : Union[str, Any] = cache_dir _snake_case : int = keep_in_memory _snake_case : int = streaming _snake_case : Dict = num_proc _snake_case : str = kwargs @abstractmethod def UpperCAmelCase_ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: """simple docstring""" pass class lowerCamelCase (a__ ): def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ) -> List[Any]: """simple docstring""" _snake_case : Any = features _snake_case : int = cache_dir _snake_case : Any = keep_in_memory _snake_case : str = streaming _snake_case : List[Any] = num_proc _snake_case : Dict = kwargs @abstractmethod def UpperCAmelCase_ ( self ) -> Union[Dataset, IterableDataset]: """simple docstring""" pass
47
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(lowerCAmelCase_ , int(b / 2 ) ) * actual_power(lowerCAmelCase_ , int(b / 2 ) ) else: return a * actual_power(lowerCAmelCase_ , int(b / 2 ) ) * actual_power(lowerCAmelCase_ , int(b / 2 ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if b < 0: return 1 / actual_power(lowerCAmelCase_ , lowerCAmelCase_ ) return actual_power(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": print(power(-2, -3))
47
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' import math def _a ( lowerCAmelCase_ = 100 ): """simple docstring""" _snake_case : List[str] = sum(i * i for i in range(1 , n + 1 ) ) _snake_case : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
47
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
1
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
'''simple docstring''' import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCamelCase (a__ ): def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> List[str]: """simple docstring""" super().__init__() _snake_case : int = value_function _snake_case : Dict = unet _snake_case : List[Any] = scheduler _snake_case : List[str] = env _snake_case : Dict = env.get_dataset() _snake_case : List[str] = {} for key in self.data.keys(): try: _snake_case : List[Any] = self.data[key].mean() except: # noqa: E722 pass _snake_case : Union[str, Any] = {} for key in self.data.keys(): try: _snake_case : int = self.data[key].std() except: # noqa: E722 pass _snake_case : Dict = env.observation_space.shape[0] _snake_case : Union[str, Any] = env.action_space.shape[0] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Optional[int]: """simple docstring""" return x_in * self.stds[key] + self.means[key] def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]: """simple docstring""" if type(lowercase__ ) is dict: return {k: self.to_torch(lowercase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowercase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowercase__ , device=self.unet.device ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" for key, val in cond.items(): _snake_case : Dict = val.clone() return x_in def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" _snake_case : List[Any] = x.shape[0] _snake_case : Tuple = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model _snake_case : Optional[Any] = torch.full((batch_size,) , lowercase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowercase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models _snake_case : Union[str, Any] = self.value_function(x.permute(0 , 2 , 1 ) , lowercase__ ).sample _snake_case : int = torch.autograd.grad([y.sum()] , [x] )[0] _snake_case : Dict = self.scheduler._get_variance(lowercase__ ) _snake_case : Dict = torch.exp(0.5 * posterior_variance ) _snake_case : Union[str, Any] = model_std * grad _snake_case : str = 0 _snake_case : Tuple = x.detach() _snake_case : Dict = x + scale * grad _snake_case : Optional[int] = self.reset_xa(lowercase__ , lowercase__ , self.action_dim ) _snake_case : Any = self.unet(x.permute(0 , 2 , 1 ) , lowercase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg _snake_case : Tuple = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , predict_epsilon=lowercase__ )['''prev_sample'''] # apply conditions to the trajectory (set the initial state) _snake_case : int = self.reset_xa(lowercase__ , lowercase__ , self.action_dim ) _snake_case : Optional[int] = self.to_torch(lowercase__ ) return x, y def __call__( self , lowercase__ , lowercase__=64 , lowercase__=32 , lowercase__=2 , lowercase__=0.1 ) -> Any: """simple docstring""" _snake_case : int = self.normalize(lowercase__ , '''observations''' ) _snake_case : Dict = obs[None].repeat(lowercase__ , axis=0 ) _snake_case : int = {0: self.to_torch(lowercase__ )} _snake_case : List[str] = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) _snake_case : Any = randn_tensor(lowercase__ , device=self.unet.device ) _snake_case : Union[str, Any] = self.reset_xa(lowercase__ , lowercase__ , self.action_dim ) _snake_case : Any = self.to_torch(lowercase__ ) # run the diffusion process _snake_case , _snake_case : List[Any] = self.run_diffusion(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # sort output trajectories by value _snake_case : Any = y.argsort(0 , descending=lowercase__ ).squeeze() _snake_case : int = x[sorted_idx] _snake_case : Optional[Any] = sorted_values[:, :, : self.action_dim] _snake_case : List[str] = actions.detach().cpu().numpy() _snake_case : Tuple = self.de_normalize(lowercase__ , key='''actions''' ) # select the action with the highest value if y is not None: _snake_case : Any = 0 else: # if we didn't run value guiding, select a random action _snake_case : str = np.random.randint(0 , lowercase__ ) _snake_case : int = denorm_actions[selected_index, 0] return denorm_actions
47
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class lowerCamelCase (a__ ): _lowercase : List[str] = """owlvit_text_model""" def __init__( self , lowercase__=49_408 , lowercase__=512 , lowercase__=2_048 , lowercase__=12 , lowercase__=8 , lowercase__=16 , lowercase__="quick_gelu" , lowercase__=1E-5 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1.0 , lowercase__=0 , lowercase__=49_406 , lowercase__=49_407 , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ ) _snake_case : List[Any] = vocab_size _snake_case : List[Any] = hidden_size _snake_case : int = intermediate_size _snake_case : str = num_hidden_layers _snake_case : Tuple = num_attention_heads _snake_case : Optional[int] = max_position_embeddings _snake_case : Union[str, Any] = hidden_act _snake_case : Dict = layer_norm_eps _snake_case : Optional[Any] = attention_dropout _snake_case : Dict = initializer_range _snake_case : Optional[Any] = initializer_factor @classmethod def UpperCAmelCase_ ( cls , lowercase__ , **lowercase__ ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(lowercase__ ) _snake_case , _snake_case : int = cls.get_config_dict(lowercase__ , **lowercase__ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": _snake_case : str = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowercase__ , **lowercase__ ) class lowerCamelCase (a__ ): _lowercase : List[Any] = """owlvit_vision_model""" def __init__( self , lowercase__=768 , lowercase__=3_072 , lowercase__=12 , lowercase__=12 , lowercase__=3 , lowercase__=768 , lowercase__=32 , lowercase__="quick_gelu" , lowercase__=1E-5 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1.0 , **lowercase__ , ) -> Tuple: """simple docstring""" super().__init__(**lowercase__ ) _snake_case : Union[str, Any] = hidden_size _snake_case : Dict = intermediate_size _snake_case : Any = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[Any] = num_channels _snake_case : List[Any] = image_size _snake_case : Dict = patch_size _snake_case : Optional[int] = hidden_act _snake_case : str = layer_norm_eps _snake_case : Any = attention_dropout _snake_case : Optional[Any] = initializer_range _snake_case : Tuple = initializer_factor @classmethod def UpperCAmelCase_ ( cls , lowercase__ , **lowercase__ ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(lowercase__ ) _snake_case , _snake_case : Any = cls.get_config_dict(lowercase__ , **lowercase__ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": _snake_case : str = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowercase__ , **lowercase__ ) class lowerCamelCase (a__ ): _lowercase : Dict = """owlvit""" _lowercase : Dict = True def __init__( self , lowercase__=None , lowercase__=None , lowercase__=512 , lowercase__=2.6_592 , lowercase__=True , **lowercase__ , ) -> List[str]: """simple docstring""" super().__init__(**lowercase__ ) if text_config is None: _snake_case : Dict = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: _snake_case : List[str] = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) _snake_case : Union[str, Any] = OwlViTTextConfig(**lowercase__ ) _snake_case : Dict = OwlViTVisionConfig(**lowercase__ ) _snake_case : str = projection_dim _snake_case : Optional[Any] = logit_scale_init_value _snake_case : int = return_dict _snake_case : Union[str, Any] = 1.0 @classmethod def UpperCAmelCase_ ( cls , lowercase__ , **lowercase__ ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(lowercase__ ) _snake_case , _snake_case : Optional[int] = cls.get_config_dict(lowercase__ , **lowercase__ ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowercase__ , **lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ , lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : List[Any] = {} _snake_case : List[str] = text_config _snake_case : Optional[int] = vision_config return cls.from_dict(lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : int = copy.deepcopy(self.__dict__ ) _snake_case : Tuple = self.text_config.to_dict() _snake_case : str = self.vision_config.to_dict() _snake_case : List[str] = self.__class__.model_type return output class lowerCamelCase (a__ ): @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def UpperCAmelCase_ ( self ) -> float: """simple docstring""" return 1E-4 def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = None , ) -> Mapping[str, Any]: """simple docstring""" _snake_case : str = super().generate_dummy_inputs( processor.tokenizer , batch_size=lowercase__ , seq_length=lowercase__ , framework=lowercase__ ) _snake_case : int = super().generate_dummy_inputs( processor.image_processor , batch_size=lowercase__ , framework=lowercase__ ) return {**text_input_dict, **image_input_dict} @property def UpperCAmelCase_ ( self ) -> int: """simple docstring""" return 14
47
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
1
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class lowerCamelCase (a__ ): def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : Union[str, Any] = 8 # DPR tok _snake_case : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _snake_case : List[Any] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) _snake_case : Union[str, Any] = os.path.join(lowercase__ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok _snake_case : Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _snake_case : str = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) _snake_case : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _snake_case : str = {'''unk_token''': '''<unk>'''} _snake_case : Union[str, Any] = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) _snake_case : Dict = os.path.join(lowercase__ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case : List[str] = os.path.join(lowercase__ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase__ ) ) def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer: """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def UpperCAmelCase_ ( self ) -> BartTokenizer: """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : List[str] = os.path.join(self.tmpdirname , '''rag_tokenizer''' ) _snake_case : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) _snake_case : int = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(lowercase__ ) rag_tokenizer.save_pretrained(lowercase__ ) _snake_case : Optional[Any] = RagTokenizer.from_pretrained(lowercase__ , config=lowercase__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , lowercase__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , lowercase__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : Union[str, Any] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' ) _snake_case : int = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] _snake_case : Optional[Any] = tokenizer(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Optional[Any] = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' ) _snake_case : str = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] _snake_case : Any = tokenizer(lowercase__ ) self.assertIsNotNone(lowercase__ )
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
1
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'huggingface/informer-tourism-monthly': ( 'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json' ), # See all Informer models at https://huggingface.co/models?filter=informer } class lowerCamelCase (a__ ): _lowercase : Optional[Any] = """informer""" _lowercase : Optional[Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = "student_t" , lowercase__ = "nll" , lowercase__ = 1 , lowercase__ = None , lowercase__ = "mean" , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = 64 , lowercase__ = 32 , lowercase__ = 32 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = True , lowercase__ = "gelu" , lowercase__ = 0.05 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 100 , lowercase__ = 0.02 , lowercase__=True , lowercase__ = "prob" , lowercase__ = 5 , lowercase__ = True , **lowercase__ , ) -> List[str]: """simple docstring""" _snake_case : List[str] = prediction_length _snake_case : Any = context_length or prediction_length _snake_case : Any = distribution_output _snake_case : Tuple = loss _snake_case : Dict = input_size _snake_case : List[Any] = num_time_features _snake_case : Tuple = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] _snake_case : str = scaling _snake_case : Optional[Any] = num_dynamic_real_features _snake_case : Dict = num_static_real_features _snake_case : List[str] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowercase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) _snake_case : Union[str, Any] = cardinality else: _snake_case : int = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowercase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) _snake_case : List[Any] = embedding_dimension else: _snake_case : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _snake_case : Optional[Any] = num_parallel_samples # Transformer architecture configuration _snake_case : int = input_size * len(self.lags_sequence ) + self._number_of_features _snake_case : Union[str, Any] = d_model _snake_case : str = encoder_attention_heads _snake_case : Tuple = decoder_attention_heads _snake_case : Tuple = encoder_ffn_dim _snake_case : Optional[int] = decoder_ffn_dim _snake_case : List[str] = encoder_layers _snake_case : int = decoder_layers _snake_case : Dict = dropout _snake_case : List[str] = attention_dropout _snake_case : Optional[int] = activation_dropout _snake_case : List[Any] = encoder_layerdrop _snake_case : Union[str, Any] = decoder_layerdrop _snake_case : Tuple = activation_function _snake_case : Union[str, Any] = init_std _snake_case : Union[str, Any] = use_cache # Informer _snake_case : List[Any] = attention_type _snake_case : Optional[int] = sampling_factor _snake_case : Tuple = distil super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ ) @property def UpperCAmelCase_ ( self ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
1
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowerCamelCase (a__ ): def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Tuple = SMALL_MODEL_IDENTIFIER _snake_case : Optional[int] = '''pt''' _snake_case : Optional[Any] = '''tf''' def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]: """simple docstring""" _snake_case : List[str] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" _snake_case : Dict = TFAutoModel.from_pretrained(self.test_model , from_pt=lowercase__ ) model_tf.save_pretrained(lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : List[str] = '''mock_framework''' # Framework provided - return whatever the user provides _snake_case : Tuple = FeaturesManager.determine_framework(self.test_model , lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(lowercase__ ) _snake_case : Optional[Any] = FeaturesManager.determine_framework(lowercase__ , lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(lowercase__ ) _snake_case : Tuple = FeaturesManager.determine_framework(lowercase__ , lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(lowercase__ ) _snake_case : Dict = FeaturesManager.determine_framework(lowercase__ ) self.assertEqual(lowercase__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(lowercase__ ) _snake_case : Optional[int] = FeaturesManager.determine_framework(lowercase__ ) self.assertEqual(lowercase__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(lowercase__ ): _snake_case : Union[str, Any] = FeaturesManager.determine_framework(lowercase__ ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : Optional[int] = MagicMock(return_value=lowercase__ ) with patch('''transformers.onnx.features.is_tf_available''' , lowercase__ ): _snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(lowercase__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _snake_case : Optional[Any] = MagicMock(return_value=lowercase__ ) with patch('''transformers.onnx.features.is_torch_available''' , lowercase__ ): _snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(lowercase__ , self.framework_tf ) # Both in environment -> use PyTorch _snake_case : Optional[int] = MagicMock(return_value=lowercase__ ) _snake_case : str = MagicMock(return_value=lowercase__ ) with patch('''transformers.onnx.features.is_tf_available''' , lowercase__ ), patch( '''transformers.onnx.features.is_torch_available''' , lowercase__ ): _snake_case : Tuple = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(lowercase__ , self.framework_pt ) # Both not in environment -> raise error _snake_case : Dict = MagicMock(return_value=lowercase__ ) _snake_case : Optional[int] = MagicMock(return_value=lowercase__ ) with patch('''transformers.onnx.features.is_tf_available''' , lowercase__ ), patch( '''transformers.onnx.features.is_torch_available''' , lowercase__ ): with self.assertRaises(lowercase__ ): _snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
47
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
1
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[Any] = 'platform' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _snake_case : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _snake_case : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _snake_case : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase : def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=16 , lowercase__=2 , lowercase__=4 , lowercase__=4 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=32 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=0.02 , ) -> Union[str, Any]: """simple docstring""" _snake_case : Tuple = parent _snake_case : int = batch_size _snake_case : List[str] = seq_length _snake_case : List[str] = is_training _snake_case : Any = use_labels _snake_case : Optional[Any] = vocab_size _snake_case : Tuple = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : int = num_attention_heads _snake_case : Any = intermediate_size _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_dropout_prob _snake_case : str = attention_probs_dropout_prob _snake_case : int = max_position_embeddings _snake_case : Optional[int] = eos_token_id _snake_case : Optional[Any] = pad_token_id _snake_case : List[Any] = bos_token_id _snake_case : Any = initializer_range def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _snake_case : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _snake_case : Any = shift_tokens_right(lowercase__ , 1 , 2 ) _snake_case : Any = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase__ , ) _snake_case : List[Any] = prepare_blenderbot_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) return config, inputs_dict def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case , _snake_case : Any = self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]: """simple docstring""" _snake_case : Union[str, Any] = 20 _snake_case : int = model_class_name(lowercase__ ) _snake_case : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] ) _snake_case , _snake_case : Optional[Any] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) _snake_case : int = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ ) _snake_case : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) _snake_case : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _snake_case : List[Any] = model.decode( decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , ) _snake_case : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) _snake_case : Optional[int] = model.decode( decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , ) _snake_case : Optional[Any] = model.decode(lowercase__ , lowercase__ ) _snake_case : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : Dict = 20 _snake_case : Any = model_class_name(lowercase__ ) _snake_case : List[Any] = model.encode(inputs_dict['''input_ids'''] ) _snake_case , _snake_case : List[str] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) _snake_case : Tuple = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _snake_case : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ ) _snake_case : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _snake_case : List[str] = model.decode( decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , ) _snake_case : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) _snake_case : List[str] = model.decode( decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , ) _snake_case : Optional[Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ ) _snake_case : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class lowerCamelCase (unittest.TestCase ): _lowercase : Optional[Any] = 99 def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Dict = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _snake_case : Union[str, Any] = input_ids.shape[0] _snake_case : Union[str, Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case , _snake_case , _snake_case : Tuple = self._get_config_and_data() _snake_case : Dict = FlaxBlenderbotForConditionalGeneration(lowercase__ ) _snake_case : List[Any] = lm_model(input_ids=lowercase__ ) _snake_case : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _snake_case : Optional[Any] = FlaxBlenderbotForConditionalGeneration(lowercase__ ) _snake_case : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) _snake_case : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) _snake_case : List[str] = lm_model(input_ids=lowercase__ , decoder_input_ids=lowercase__ ) _snake_case : Any = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , lowercase__ ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) _snake_case : Optional[int] = shift_tokens_right(lowercase__ , 1 , 2 ) _snake_case : Optional[int] = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum() _snake_case : Dict = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(lowercase__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase (a__ , unittest.TestCase , a__ ): _lowercase : Optional[Any] = True _lowercase : List[str] = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) _lowercase : Tuple = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : Any = FlaxBlenderbotModelTester(self ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _snake_case : Dict = self._prepare_for_class(lowercase__ , lowercase__ ) _snake_case : List[str] = model_class(lowercase__ ) @jax.jit def encode_jitted(lowercase__ , lowercase__=None , **lowercase__ ): return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ ) with self.subTest('''JIT Enabled''' ): _snake_case : Tuple = encode_jitted(**lowercase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): _snake_case : Tuple = encode_jitted(**lowercase__ ).to_tuple() self.assertEqual(len(lowercase__ ) , len(lowercase__ ) ) for jitted_output, output in zip(lowercase__ , lowercase__ ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _snake_case : Optional[int] = model_class(lowercase__ ) _snake_case : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) _snake_case : List[Any] = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(lowercase__ , lowercase__ , lowercase__ ): return model.decode( decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , ) with self.subTest('''JIT Enabled''' ): _snake_case : List[str] = decode_jitted(**lowercase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): _snake_case : Any = decode_jitted(**lowercase__ ).to_tuple() self.assertEqual(len(lowercase__ ) , len(lowercase__ ) ) for jitted_output, output in zip(lowercase__ , lowercase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: _snake_case : Dict = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _snake_case : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id _snake_case : Union[str, Any] = model(lowercase__ ) self.assertIsNotNone(lowercase__ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : str = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} _snake_case : Tuple = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} _snake_case : Optional[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=lowercase__ ) _snake_case : int = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) _snake_case : Any = ['''Sam'''] _snake_case : List[str] = tokenizer(lowercase__ , return_tensors='''jax''' ) _snake_case : Union[str, Any] = model.generate(**lowercase__ , **lowercase__ ) _snake_case : Tuple = '''Sam is a great name. It means "sun" in Gaelic.''' _snake_case : Dict = tokenizer.batch_decode(lowercase__ , **lowercase__ ) assert generated_txt[0].strip() == tgt_text
47
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
1
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = BertConfig.from_json_file(lowerCAmelCase_ ) print(f'''Building PyTorch model from configuration: {config}''' ) _snake_case : Optional[Any] = BertForPreTraining(lowerCAmelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_bert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--bert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
47
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCamelCase (a__ ): _lowercase : UNetaDModel _lowercase : ScoreSdeVeScheduler def __init__( self , lowercase__ , lowercase__ ) -> Optional[int]: """simple docstring""" super().__init__() self.register_modules(unet=lowercase__ , scheduler=lowercase__ ) @torch.no_grad() def __call__( self , lowercase__ = 1 , lowercase__ = 2_000 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , **lowercase__ , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" _snake_case : Optional[int] = self.unet.config.sample_size _snake_case : int = (batch_size, 3, img_size, img_size) _snake_case : Dict = self.unet _snake_case : List[str] = randn_tensor(lowercase__ , generator=lowercase__ ) * self.scheduler.init_noise_sigma _snake_case : Tuple = sample.to(self.device ) self.scheduler.set_timesteps(lowercase__ ) self.scheduler.set_sigmas(lowercase__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): _snake_case : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): _snake_case : Dict = self.unet(lowercase__ , lowercase__ ).sample _snake_case : Dict = self.scheduler.step_correct(lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample # prediction step _snake_case : int = model(lowercase__ , lowercase__ ).sample _snake_case : List[Any] = self.scheduler.step_pred(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ ) _snake_case , _snake_case : List[Any] = output.prev_sample, output.prev_sample_mean _snake_case : List[str] = sample_mean.clamp(0 , 1 ) _snake_case : List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _snake_case : Any = self.numpy_to_pil(lowercase__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=lowercase__ )
47
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
1
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase : Optional[Any] = TypeVar('T') def _a ( lowerCAmelCase_ ): """simple docstring""" return (position - 1) // 2 def _a ( lowerCAmelCase_ ): """simple docstring""" return (2 * position) + 1 def _a ( lowerCAmelCase_ ): """simple docstring""" return (2 * position) + 2 class lowerCamelCase (Generic[T] ): def __init__( self ) -> None: """simple docstring""" _snake_case : list[tuple[T, int]] = [] _snake_case : dict[T, int] = {} _snake_case : int = 0 def __len__( self ) -> int: """simple docstring""" return self.elements def __repr__( self ) -> str: """simple docstring""" return str(self.heap ) def UpperCAmelCase_ ( self ) -> bool: """simple docstring""" return self.elements == 0 def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" self.heap.append((elem, weight) ) _snake_case : Optional[Any] = self.elements self.elements += 1 self._bubble_up(lowercase__ ) def UpperCAmelCase_ ( self ) -> T: """simple docstring""" if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _snake_case , _snake_case : List[str] = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _snake_case , _snake_case : Tuple = self.heap[0] self._bubble_down(lowercase__ ) return elem def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" _snake_case : List[str] = self.position_map[elem] _snake_case : Any = (elem, weight) if position > 0: _snake_case : Optional[int] = get_parent_position(lowercase__ ) _snake_case , _snake_case : Tuple = self.heap[parent_position] if parent_weight > weight: self._bubble_up(lowercase__ ) else: self._bubble_down(lowercase__ ) else: self._bubble_down(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.position_map[elem] if curr_pos == 0: return None _snake_case : Optional[int] = get_parent_position(lowercase__ ) _snake_case , _snake_case : Optional[Any] = self.heap[curr_pos] _snake_case , _snake_case : int = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(lowercase__ , lowercase__ ) return self._bubble_up(lowercase__ ) return None def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Any = self.position_map[elem] _snake_case , _snake_case : List[Any] = self.heap[curr_pos] _snake_case : Dict = get_child_left_position(lowercase__ ) _snake_case : Optional[int] = get_child_right_position(lowercase__ ) if child_left_position < self.elements and child_right_position < self.elements: _snake_case , _snake_case : Any = self.heap[child_left_position] _snake_case , _snake_case : str = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(lowercase__ , lowercase__ ) return self._bubble_down(lowercase__ ) if child_left_position < self.elements: _snake_case , _snake_case : List[str] = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(lowercase__ , lowercase__ ) return self._bubble_down(lowercase__ ) else: return None if child_right_position < self.elements: _snake_case , _snake_case : Optional[int] = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(lowercase__ , lowercase__ ) return self._bubble_down(lowercase__ ) return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" _snake_case : List[Any] = self.heap[nodea_pos][0] _snake_case : str = self.heap[nodea_pos][0] _snake_case , _snake_case : Union[str, Any] = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _snake_case : Dict = nodea_pos _snake_case : Any = nodea_pos class lowerCamelCase (Generic[T] ): def __init__( self ) -> None: """simple docstring""" _snake_case : dict[T, dict[T, int]] = {} _snake_case : int = 0 def __repr__( self ) -> str: """simple docstring""" return str(self.connections ) def __len__( self ) -> int: """simple docstring""" return self.nodes def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" if node not in self.connections: _snake_case : Optional[int] = {} self.nodes += 1 def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> None: """simple docstring""" self.add_node(lowercase__ ) self.add_node(lowercase__ ) _snake_case : Union[str, Any] = weight _snake_case : Any = weight def _a ( lowerCAmelCase_ , ): """simple docstring""" _snake_case : dict[T, int] = {node: maxsize for node in graph.connections} _snake_case : dict[T, T | None] = {node: None for node in graph.connections} _snake_case : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(lowerCAmelCase_ , lowerCAmelCase_ ) if priority_queue.is_empty(): return dist, parent # initialization _snake_case : str = priority_queue.extract_min() _snake_case : Optional[int] = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _snake_case : Dict = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] ) _snake_case : int = node # running prim's algorithm while not priority_queue.is_empty(): _snake_case : Any = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _snake_case : int = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] ) _snake_case : List[str] = node return dist, parent
47
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( 'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion' ) UpperCAmelCase : Any = None UpperCAmelCase : List[str] = { '7B': 1_1_0_0_8, '13B': 1_3_8_2_4, '30B': 1_7_9_2_0, '65B': 2_2_0_1_6, '70B': 2_8_6_7_2, } UpperCAmelCase : Tuple = { '7B': 1, '7Bf': 1, '13B': 2, '13Bf': 2, '30B': 4, '65B': 8, '70B': 8, '70Bf': 8, } def _a ( lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=256 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def _a ( lowerCAmelCase_ ): """simple docstring""" with open(lowerCAmelCase_ , '''r''' ) as f: return json.load(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" with open(lowerCAmelCase_ , '''w''' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ): """simple docstring""" os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) _snake_case : Tuple = os.path.join(lowerCAmelCase_ , '''tmp''' ) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) _snake_case : Optional[Any] = read_json(os.path.join(lowerCAmelCase_ , '''params.json''' ) ) _snake_case : Optional[int] = NUM_SHARDS[model_size] _snake_case : Tuple = params['''n_layers'''] _snake_case : str = params['''n_heads'''] _snake_case : Optional[Any] = n_heads // num_shards _snake_case : Any = params['''dim'''] _snake_case : Any = dim // n_heads _snake_case : Union[str, Any] = 10_000.0 _snake_case : int = 1.0 / (base ** (torch.arange(0 , lowerCAmelCase_ , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _snake_case : str = params['''n_kv_heads'''] # for GQA / MQA _snake_case : Optional[int] = n_heads_per_shard // num_key_value_heads _snake_case : Optional[Any] = dim // num_key_value_heads else: # compatibility with other checkpoints _snake_case : Any = n_heads _snake_case : List[Any] = n_heads_per_shard _snake_case : List[Any] = dim # permute for sliced rotary def permute(lowerCAmelCase_ , lowerCAmelCase_=n_heads , lowerCAmelCase_=dim , lowerCAmelCase_=dim ): return w.view(lowerCAmelCase_ , dima // n_heads // 2 , 2 , lowerCAmelCase_ ).transpose(1 , 2 ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _snake_case : List[str] = torch.load(os.path.join(lowerCAmelCase_ , '''consolidated.00.pth''' ) , map_location='''cpu''' ) else: # Sharded _snake_case : Any = [ torch.load(os.path.join(lowerCAmelCase_ , f'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' ) for i in range(lowerCAmelCase_ ) ] _snake_case : Dict = 0 _snake_case : Optional[Any] = {'''weight_map''': {}} for layer_i in range(lowerCAmelCase_ ): _snake_case : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded _snake_case : List[str] = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _snake_case : str = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } _snake_case : int = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ) ] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Any = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ) ] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) _snake_case : str = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ) ] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(lowerCAmelCase_ )] , dim=1 ) _snake_case : Optional[Any] = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(lowerCAmelCase_ )] , dim=0 ) _snake_case : List[str] = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(lowerCAmelCase_ )] , dim=1 ) _snake_case : int = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(lowerCAmelCase_ )] , dim=0 ) _snake_case : Dict = inv_freq for k, v in state_dict.items(): _snake_case : List[str] = filename param_count += v.numel() torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Tuple = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded _snake_case : Optional[int] = { '''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''], '''model.norm.weight''': loaded['''norm.weight'''], '''lm_head.weight''': loaded['''output.weight'''], } else: _snake_case : Any = { '''model.norm.weight''': loaded[0]['''norm.weight'''], '''model.embed_tokens.weight''': torch.cat( [loaded[i]['''tok_embeddings.weight'''] for i in range(lowerCAmelCase_ )] , dim=1 ), '''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(lowerCAmelCase_ )] , dim=0 ), } for k, v in state_dict.items(): _snake_case : List[Any] = filename param_count += v.numel() torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) # Write configs _snake_case : Optional[Any] = {'''total_size''': param_count * 2} write_json(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , '''pytorch_model.bin.index.json''' ) ) _snake_case : Optional[int] = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1 _snake_case : str = params['''multiple_of'''] if '''multiple_of''' in params else 256 _snake_case : Optional[int] = LlamaConfig( hidden_size=lowerCAmelCase_ , intermediate_size=compute_intermediate_size(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=lowerCAmelCase_ , ) config.save_pretrained(lowerCAmelCase_ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('''Loading the checkpoint in a Llama model.''' ) _snake_case : Optional[int] = LlamaForCausalLM.from_pretrained(lowerCAmelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCAmelCase_ ) # Avoid saving this as part of the config. del model.config._name_or_path print('''Saving in the Transformers format.''' ) model.save_pretrained(lowerCAmelCase_ , safe_serialization=lowerCAmelCase_ ) shutil.rmtree(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) _snake_case : Optional[int] = tokenizer_class(lowerCAmelCase_ ) tokenizer.save_pretrained(lowerCAmelCase_ ) def _a ( ): """simple docstring""" _snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument( '''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , ) parser.add_argument( '''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , ) parser.add_argument( '''--output_dir''' , help='''Location to write HF model and tokenizer''' , ) parser.add_argument('''--safe_serialization''' , type=lowerCAmelCase_ , help='''Whether or not to save using `safetensors`.''' ) _snake_case : int = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _snake_case : List[str] = os.path.join(args.input_dir , '''tokenizer.model''' ) write_tokenizer(args.output_dir , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
1
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class lowerCamelCase (a__ ): def __init__( self , **lowercase__ ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Dict: """simple docstring""" _snake_case : Optional[int] = [] _snake_case : List[str] = [] _snake_case : List[Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag _snake_case : List[Any] = parent.find_all(child.name , recursive=lowercase__ ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(lowercase__ ) else next(i for i, s in enumerate(lowercase__ , 1 ) if s is child ) ) _snake_case : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = BeautifulSoup(lowercase__ , '''html.parser''' ) _snake_case : List[str] = [] _snake_case : Tuple = [] _snake_case : Any = [] for element in html_code.descendants: if type(lowercase__ ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue _snake_case : str = html.unescape(lowercase__ ).strip() if not text_in_this_tag: continue all_doc_strings.append(lowercase__ ) _snake_case , _snake_case : List[str] = self.xpath_soup(lowercase__ ) stringaxtag_seq.append(lowercase__ ) stringaxsubs_seq.append(lowercase__ ) if len(lowercase__ ) != len(lowercase__ ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(lowercase__ ) != len(lowercase__ ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : Tuple = '''''' for tagname, subs in zip(lowercase__ , lowercase__ ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self , lowercase__ ) -> BatchFeature: """simple docstring""" _snake_case : Dict = False # Check that strings has a valid type if isinstance(lowercase__ , lowercase__ ): _snake_case : str = True elif isinstance(lowercase__ , (list, tuple) ): if len(lowercase__ ) == 0 or isinstance(html_strings[0] , lowercase__ ): _snake_case : Optional[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(lowercase__ )}.''' ) _snake_case : Any = bool(isinstance(lowercase__ , (list, tuple) ) and (isinstance(html_strings[0] , lowercase__ )) ) if not is_batched: _snake_case : Optional[Any] = [html_strings] # Get nodes + xpaths _snake_case : Any = [] _snake_case : str = [] for html_string in html_strings: _snake_case , _snake_case , _snake_case : Union[str, Any] = self.get_three_from_single(lowercase__ ) nodes.append(lowercase__ ) _snake_case : Union[str, Any] = [] for node, tag_list, sub_list in zip(lowercase__ , lowercase__ , lowercase__ ): _snake_case : Union[str, Any] = self.construct_xpath(lowercase__ , lowercase__ ) xpath_strings.append(lowercase__ ) xpaths.append(lowercase__ ) # return as Dict _snake_case : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} _snake_case : Tuple = BatchFeature(data=lowercase__ , tensor_type=lowercase__ ) return encoded_inputs
47
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : int = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class lowerCamelCase (a__ ): _lowercase : Any = """camembert""" def __init__( self , lowercase__=30_522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1E-1_2 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ ) _snake_case : Any = vocab_size _snake_case : int = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Any = hidden_act _snake_case : int = intermediate_size _snake_case : List[str] = hidden_dropout_prob _snake_case : Dict = attention_probs_dropout_prob _snake_case : int = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : Any = initializer_range _snake_case : Tuple = layer_norm_eps _snake_case : str = position_embedding_type _snake_case : Dict = use_cache _snake_case : Optional[int] = classifier_dropout class lowerCamelCase (a__ ): @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _snake_case : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _snake_case : Any = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
47
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
1
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" self.assertEqual(len(lowercase__ ) , len(lowercase__ ) ) for a, b in zip(lowercase__ , lowercase__ ): self.assertAlmostEqual(lowercase__ , lowercase__ , delta=lowercase__ ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : int = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(lowercase__ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : Optional[int] = None ops.enable_eager_execution_internal() _snake_case : Union[str, Any] = tf.config.list_physical_devices('''CPU''' ) if len(lowercase__ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) _snake_case : List[str] = tf.config.list_logical_devices(device_type='''CPU''' ) _snake_case : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): _snake_case : Union[str, Any] = GradientAccumulator() _snake_case : Union[str, Any] = tf.Variable([4.0, 3.0] ) _snake_case , _snake_case : List[Any] = create_optimizer(5E-5 , 10 , 5 ) _snake_case : Tuple = tf.Variable([0.0, 0.0] , trainable=lowercase__ ) def accumulate_on_replica(lowercase__ ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(lowercase__ , lowercase__ ): with strategy.scope(): _snake_case : Optional[int] = strategy.experimental_local_results(lowercase__ ) local_variables[0].assign(lowercase__ ) local_variables[1].assign(lowercase__ ) strategy.run(lowercase__ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(lowercase__ ) def _check_local_values(lowercase__ , lowercase__ ): _snake_case : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , lowercase__ , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , lowercase__ , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
47
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
1
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
1
'''simple docstring''' from __future__ import annotations import math UpperCAmelCase : Optional[int] = '2020.9.26' UpperCAmelCase : List[Any] = 'xcodz-dot, cclaus, dhruvmanila' def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if not all(isinstance(lowerCAmelCase_ , (float, int) ) for val in locals().values() ): _snake_case : int = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(lowerCAmelCase_ ) _snake_case : Optional[int] = ((x * distance) / (z + distance)) * scale _snake_case : Tuple = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise TypeError('''Axis must be a str''' ) _snake_case : Any = locals() del input_variables["axis"] if not all(isinstance(lowerCAmelCase_ , (float, int) ) for val in input_variables.values() ): _snake_case : Any = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(lowerCAmelCase_ ) _snake_case : Any = (angle % 360) / 450 * 180 / math.pi if axis == "z": _snake_case : List[Any] = x * math.cos(lowerCAmelCase_ ) - y * math.sin(lowerCAmelCase_ ) _snake_case : List[str] = y * math.cos(lowerCAmelCase_ ) + x * math.sin(lowerCAmelCase_ ) _snake_case : List[str] = z elif axis == "x": _snake_case : Union[str, Any] = y * math.cos(lowerCAmelCase_ ) - z * math.sin(lowerCAmelCase_ ) _snake_case : Any = z * math.cos(lowerCAmelCase_ ) + y * math.sin(lowerCAmelCase_ ) _snake_case : int = x elif axis == "y": _snake_case : Optional[Any] = x * math.cos(lowerCAmelCase_ ) - z * math.sin(lowerCAmelCase_ ) _snake_case : str = z * math.cos(lowerCAmelCase_ ) + x * math.sin(lowerCAmelCase_ ) _snake_case : Any = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
47
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = (boundary[1] - boundary[0]) / steps _snake_case : Optional[Any] = boundary[0] _snake_case : List[Any] = boundary[1] _snake_case : List[str] = make_points(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[int] = 0.0 y += (h / 2.0) * f(lowerCAmelCase_ ) for i in x_i: # print(i) y += h * f(lowerCAmelCase_ ) y += (h / 2.0) * f(lowerCAmelCase_ ) return y def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = a + h while x < (b - h): yield x _snake_case : str = x + h def _a ( lowerCAmelCase_ ): # enter your function here """simple docstring""" _snake_case : Tuple = (x - 0) * (x - 0) return y def _a ( ): """simple docstring""" _snake_case : str = 0.0 # Lower bound of integration _snake_case : Optional[int] = 1.0 # Upper bound of integration _snake_case : str = 10.0 # define number of steps or resolution _snake_case : int = [a, b] # define boundary of integration _snake_case : Any = method_a(lowerCAmelCase_ , lowerCAmelCase_ ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
47
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
1
'''simple docstring''' from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowerCamelCase (a__ ): def __lt__( self , lowercase__ ) -> List[Any]: """simple docstring""" return self[-1] < other[-1] def __eq__( self , lowercase__ ) -> Any: """simple docstring""" return self[-1] == other[-1] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : list[Stack] = [] # sort into stacks for element in collection: _snake_case : List[Any] = Stack([element] ) _snake_case : Union[str, Any] = bisect_left(lowerCAmelCase_ , lowerCAmelCase_ ) if i != len(lowerCAmelCase_ ): stacks[i].append(lowerCAmelCase_ ) else: stacks.append(lowerCAmelCase_ ) # use a heap-based merge to merge stack efficiently _snake_case : int = merge(*(reversed(lowerCAmelCase_ ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase : List[Any] = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase : Union[str, Any] = 'hf-internal-testing/tiny-random-bert' UpperCAmelCase : int = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert') UpperCAmelCase : Any = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : Dict = cached_file(lowercase__ , lowercase__ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowercase__ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowercase__ , lowercase__ ) ) ) with open(os.path.join(lowercase__ , '''refs''' , '''main''' ) ) as f: _snake_case : List[str] = f.read() self.assertEqual(lowercase__ , os.path.join(lowercase__ , '''snapshots''' , lowercase__ , lowercase__ ) ) self.assertTrue(os.path.isfile(lowercase__ ) ) # File is cached at the same place the second time. _snake_case : Optional[int] = cached_file(lowercase__ , lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) # Using a specific revision to test the full commit hash. _snake_case : List[str] = cached_file(lowercase__ , lowercase__ , revision='''9b8c223''' ) self.assertEqual(lowercase__ , os.path.join(lowercase__ , '''snapshots''' , lowercase__ , lowercase__ ) ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" with self.assertRaisesRegex(lowercase__ , '''is not a valid model identifier''' ): _snake_case : Any = cached_file('''tiny-random-bert''' , lowercase__ ) with self.assertRaisesRegex(lowercase__ , '''is not a valid git identifier''' ): _snake_case : Tuple = cached_file(lowercase__ , lowercase__ , revision='''aaaa''' ) with self.assertRaisesRegex(lowercase__ , '''does not appear to have a file named''' ): _snake_case : Tuple = cached_file(lowercase__ , '''conf''' ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" with self.assertRaisesRegex(lowercase__ , '''does not appear to have a file named''' ): _snake_case : List[Any] = cached_file(lowercase__ , '''conf''' ) with open(os.path.join(lowercase__ , '''refs''' , '''main''' ) ) as f: _snake_case : Optional[Any] = f.read() self.assertTrue(os.path.isfile(os.path.join(lowercase__ , '''.no_exist''' , lowercase__ , '''conf''' ) ) ) _snake_case : Any = cached_file(lowercase__ , '''conf''' , _raise_exceptions_for_missing_entries=lowercase__ ) self.assertIsNone(lowercase__ ) _snake_case : Union[str, Any] = cached_file(lowercase__ , '''conf''' , local_files_only=lowercase__ , _raise_exceptions_for_missing_entries=lowercase__ ) self.assertIsNone(lowercase__ ) _snake_case : List[Any] = mock.Mock() _snake_case : Union[str, Any] = 500 _snake_case : Any = {} _snake_case : Dict = HTTPError _snake_case : Tuple = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=lowercase__ ) as mock_head: _snake_case : Dict = cached_file(lowercase__ , '''conf''' , _raise_exceptions_for_connection_errors=lowercase__ ) self.assertIsNone(lowercase__ ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , lowercase__ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , lowercase__ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , lowercase__ ) ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowercase__ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , lowercase__ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowercase__ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , lowercase__ , revision='''ahaha''' ) _snake_case : Any = get_file_from_repo('''bert-base-cased''' , lowercase__ ) # The name is the cached name which is not very easy to test, so instead we load the content. _snake_case : Optional[Any] = json.loads(open(lowercase__ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _snake_case : Dict = Path(lowercase__ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(lowercase__ , '''a.txt''' ) , str(lowercase__ ) ) self.assertIsNone(get_file_from_repo(lowercase__ , '''b.txt''' ) )
47
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase (a__ ): _lowercase : Optional[int] = (DDIMParallelScheduler,) _lowercase : int = (("""eta""", 0.0), ("""num_inference_steps""", 50)) def UpperCAmelCase_ ( self , **lowercase__ ) -> int: """simple docstring""" _snake_case : str = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**lowercase__ ) return config def UpperCAmelCase_ ( self , **lowercase__ ) -> Tuple: """simple docstring""" _snake_case : List[Any] = self.scheduler_classes[0] _snake_case : List[Any] = self.get_scheduler_config(**lowercase__ ) _snake_case : str = scheduler_class(**lowercase__ ) _snake_case , _snake_case : Union[str, Any] = 10, 0.0 _snake_case : List[str] = self.dummy_model() _snake_case : Tuple = self.dummy_sample_deter scheduler.set_timesteps(lowercase__ ) for t in scheduler.timesteps: _snake_case : Union[str, Any] = model(lowercase__ , lowercase__ ) _snake_case : Any = scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample return sample def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for timesteps in [100, 500, 1_000]: self.check_over_configs(num_train_timesteps=lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase__ ) _snake_case : int = self.scheduler_classes[0] _snake_case : int = self.get_scheduler_config(steps_offset=1 ) _snake_case : Optional[int] = scheduler_class(**lowercase__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase__ ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" self.check_over_configs(thresholding=lowercase__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for t in [1, 10, 49]: self.check_over_forward(time_step=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=lowercase__ , num_inference_steps=lowercase__ ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=lowercase__ , eta=lowercase__ ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.scheduler_classes[0] _snake_case : List[Any] = self.get_scheduler_config() _snake_case : Union[str, Any] = scheduler_class(**lowercase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = self.scheduler_classes[0] _snake_case : Optional[int] = self.get_scheduler_config() _snake_case : Optional[int] = scheduler_class(**lowercase__ ) _snake_case , _snake_case : Tuple = 10, 0.0 scheduler.set_timesteps(lowercase__ ) _snake_case : Tuple = self.dummy_model() _snake_case : Any = self.dummy_sample_deter _snake_case : Tuple = self.dummy_sample_deter + 0.1 _snake_case : Optional[Any] = self.dummy_sample_deter - 0.1 _snake_case : Optional[int] = samplea.shape[0] _snake_case : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 ) _snake_case : Optional[Any] = torch.arange(lowercase__ )[0:3, None].repeat(1 , lowercase__ ) _snake_case : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _snake_case : Dict = scheduler.batch_step_no_noise(lowercase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase__ ) _snake_case : List[Any] = torch.sum(torch.abs(lowercase__ ) ) _snake_case : Optional[int] = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2 assert abs(result_mean.item() - 0.4_982 ) < 1E-3 def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : List[Any] = self.full_loop() _snake_case : int = torch.sum(torch.abs(lowercase__ ) ) _snake_case : int = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 172.0_067 ) < 1E-2 assert abs(result_mean.item() - 0.223_967 ) < 1E-3 def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Tuple = self.full_loop(prediction_type='''v_prediction''' ) _snake_case : int = torch.sum(torch.abs(lowercase__ ) ) _snake_case : Optional[Any] = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 52.5_302 ) < 1E-2 assert abs(result_mean.item() - 0.0_684 ) < 1E-3 def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Tuple = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 ) _snake_case : str = torch.sum(torch.abs(lowercase__ ) ) _snake_case : Optional[Any] = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 149.8_295 ) < 1E-2 assert abs(result_mean.item() - 0.1_951 ) < 1E-3 def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Optional[Any] = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 ) _snake_case : Dict = torch.sum(torch.abs(lowercase__ ) ) _snake_case : Optional[Any] = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 149.0_784 ) < 1E-2 assert abs(result_mean.item() - 0.1_941 ) < 1E-3
47
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
1
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters UpperCAmelCase : int = (7_2_0, 1_2_8_0) # Height, Width UpperCAmelCase : Union[str, Any] = (0.4, 0.6) # if height or width lower than this scale, drop it. UpperCAmelCase : List[str] = 1 / 1_0_0 UpperCAmelCase : List[Any] = '' UpperCAmelCase : Union[str, Any] = '' UpperCAmelCase : List[str] = '' UpperCAmelCase : Optional[int] = 2_5_0 def _a ( ): """simple docstring""" _snake_case , _snake_case : List[str] = get_dataset(lowerCAmelCase_ , lowerCAmelCase_ ) for index in range(lowerCAmelCase_ ): _snake_case : Dict = random.sample(range(len(lowerCAmelCase_ ) ) , 4 ) _snake_case , _snake_case , _snake_case : List[Any] = update_image_and_anno( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , filter_scale=lowerCAmelCase_ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _snake_case : Optional[Any] = random_chars(32 ) _snake_case : List[Any] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0] _snake_case : str = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}''' cva.imwrite(f'''{file_root}.jpg''' , lowerCAmelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' ) _snake_case : Optional[int] = [] for anno in new_annos: _snake_case : str = anno[3] - anno[1] _snake_case : Any = anno[4] - anno[2] _snake_case : Dict = anno[1] + width / 2 _snake_case : Union[str, Any] = anno[2] + height / 2 _snake_case : Union[str, Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}''' annos_list.append(lowerCAmelCase_ ) with open(f'''{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = [] _snake_case : str = [] for label_file in glob.glob(os.path.join(lowerCAmelCase_ , '''*.txt''' ) ): _snake_case : Union[str, Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(lowerCAmelCase_ ) as in_file: _snake_case : int = in_file.readlines() _snake_case : Union[str, Any] = os.path.join(lowerCAmelCase_ , f'''{label_name}.jpg''' ) _snake_case : Any = [] for obj_list in obj_lists: _snake_case : Dict = obj_list.rstrip('''\n''' ).split(''' ''' ) _snake_case : Union[str, Any] = float(obj[1] ) - float(obj[3] ) / 2 _snake_case : str = float(obj[2] ) - float(obj[4] ) / 2 _snake_case : Any = float(obj[1] ) + float(obj[3] ) / 2 _snake_case : Optional[Any] = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(lowerCAmelCase_ ) labels.append(lowerCAmelCase_ ) return img_paths, labels def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , ): """simple docstring""" _snake_case : Optional[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) _snake_case : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _snake_case : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _snake_case : List[str] = int(scale_x * output_size[1] ) _snake_case : List[Any] = int(scale_y * output_size[0] ) _snake_case : List[Any] = [] _snake_case : Any = [] for i, index in enumerate(lowerCAmelCase_ ): _snake_case : Tuple = all_img_list[index] path_list.append(lowerCAmelCase_ ) _snake_case : List[Any] = all_annos[index] _snake_case : Optional[Any] = cva.imread(lowerCAmelCase_ ) if i == 0: # top-left _snake_case : List[str] = cva.resize(lowerCAmelCase_ , (divid_point_x, divid_point_y) ) _snake_case : str = img for bbox in img_annos: _snake_case : List[Any] = bbox[1] * scale_x _snake_case : str = bbox[2] * scale_y _snake_case : Any = bbox[3] * scale_x _snake_case : List[str] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right _snake_case : List[Any] = cva.resize(lowerCAmelCase_ , (output_size[1] - divid_point_x, divid_point_y) ) _snake_case : Union[str, Any] = img for bbox in img_annos: _snake_case : int = scale_x + bbox[1] * (1 - scale_x) _snake_case : List[str] = bbox[2] * scale_y _snake_case : List[str] = scale_x + bbox[3] * (1 - scale_x) _snake_case : Dict = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left _snake_case : Optional[int] = cva.resize(lowerCAmelCase_ , (divid_point_x, output_size[0] - divid_point_y) ) _snake_case : Dict = img for bbox in img_annos: _snake_case : int = bbox[1] * scale_x _snake_case : Optional[int] = scale_y + bbox[2] * (1 - scale_y) _snake_case : int = bbox[3] * scale_x _snake_case : int = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right _snake_case : Union[str, Any] = cva.resize( lowerCAmelCase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) _snake_case : Union[str, Any] = img for bbox in img_annos: _snake_case : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x) _snake_case : Dict = scale_y + bbox[2] * (1 - scale_y) _snake_case : str = scale_x + bbox[3] * (1 - scale_x) _snake_case : str = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: _snake_case : int = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def _a ( lowerCAmelCase_ ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _snake_case : Dict = ascii_lowercase + digits return "".join(random.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ) if __name__ == "__main__": main() print('DONE ✅')
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowerCamelCase (unittest.TestCase ): def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.5, 0.5, 0.5] , lowercase__=[0.5, 0.5, 0.5] , ) -> List[Any]: """simple docstring""" _snake_case : int = size if size is not None else {'''shortest_edge''': 18} _snake_case : Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _snake_case : Union[str, Any] = parent _snake_case : List[str] = batch_size _snake_case : Optional[Any] = num_channels _snake_case : Any = image_size _snake_case : Union[str, Any] = min_resolution _snake_case : Optional[Any] = max_resolution _snake_case : Any = do_resize _snake_case : Union[str, Any] = size _snake_case : Optional[int] = do_center_crop _snake_case : Tuple = crop_size _snake_case : List[Any] = do_normalize _snake_case : Dict = image_mean _snake_case : str = image_std def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class lowerCamelCase (a__ , unittest.TestCase ): _lowercase : Tuple = LevitImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : int = LevitImageProcessingTester(self ) @property def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowercase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowercase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowercase__ , '''size''' ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input _snake_case : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _snake_case : Any = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , np.ndarray ) # Test not batched input _snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _snake_case : List[str] = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , torch.Tensor ) # Test not batched input _snake_case : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _snake_case : Dict = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
47
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCAmelCase : Optional[Any] = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase : int = { 'configuration_conditional_detr': [ 'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConditionalDetrConfig', 'ConditionalDetrOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = ['ConditionalDetrFeatureExtractor'] UpperCAmelCase : Dict = ['ConditionalDetrImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConditionalDetrForObjectDetection', 'ConditionalDetrForSegmentation', 'ConditionalDetrModel', 'ConditionalDetrPreTrainedModel', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" while a != 0: _snake_case , _snake_case : List[Any] = b % a, a return b def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if gcd(lowerCAmelCase_ , lowerCAmelCase_ ) != 1: _snake_case : List[Any] = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(lowerCAmelCase_ ) _snake_case , _snake_case , _snake_case : Any = 1, 0, a _snake_case , _snake_case , _snake_case : str = 0, 1, m while va != 0: _snake_case : List[Any] = ua // va _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCamelCase (a__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class lowerCamelCase (unittest.TestCase ): @property def UpperCAmelCase_ ( self ) -> str: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : int = ort.SessionOptions() _snake_case : str = False return options def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) _snake_case : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) _snake_case : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase__ ) _snake_case : Dict = '''A red cat sitting on a park bench''' _snake_case : Tuple = np.random.RandomState(0 ) _snake_case : Dict = pipe( prompt=lowercase__ , image=lowercase__ , mask_image=lowercase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase__ , output_type='''np''' , ) _snake_case : Union[str, Any] = output.images _snake_case : List[str] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) _snake_case : List[Any] = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) _snake_case : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) _snake_case : str = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) _snake_case : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase__ ) _snake_case : Dict = '''A red cat sitting on a park bench''' _snake_case : int = np.random.RandomState(0 ) _snake_case : int = pipe( prompt=lowercase__ , image=lowercase__ , mask_image=lowercase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase__ , output_type='''np''' , ) _snake_case : List[Any] = output.images _snake_case : Dict = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) _snake_case : Optional[int] = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
47
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
1
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCAmelCase : Dict = logging.get_logger(__name__) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : List[str] = R'''\w+[.]\d+''' _snake_case : Dict = re.findall(lowerCAmelCase_ , lowerCAmelCase_ ) for pat in pats: _snake_case : int = key.replace(lowerCAmelCase_ , '''_'''.join(pat.split('''.''' ) ) ) return key def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): _snake_case : List[Any] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: _snake_case : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: _snake_case : Union[str, Any] = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer _snake_case : int = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: _snake_case : Optional[int] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _snake_case : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": _snake_case : Union[str, Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _snake_case : List[str] = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _snake_case : Optional[int] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=42 ): """simple docstring""" _snake_case : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params _snake_case : Tuple = flax_model.init_weights(PRNGKey(lowerCAmelCase_ ) ) _snake_case : Dict = flatten_dict(lowerCAmelCase_ ) _snake_case : int = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _snake_case : Union[str, Any] = rename_key(lowerCAmelCase_ ) _snake_case : List[str] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters _snake_case , _snake_case : Dict = rename_key_and_reshape_tensor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown _snake_case : Optional[int] = jnp.asarray(lowerCAmelCase_ ) return unflatten_dict(lowerCAmelCase_ )
47
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase : List[Any] = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
1
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline UpperCAmelCase : Any = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False) parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not') parser.add_argument('--steps', default=None, type=int, help='Num inference steps') UpperCAmelCase : Any = parser.parse_args() UpperCAmelCase : Tuple = 'cpu' UpperCAmelCase : List[Any] = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings' UpperCAmelCase : Optional[Any] = 'path-to-your-trained-model' UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: UpperCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) UpperCAmelCase : int = pipe.to(device) # to channels last UpperCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last) UpperCAmelCase : int = pipe.vae.to(memory_format=torch.channels_last) UpperCAmelCase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: UpperCAmelCase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex UpperCAmelCase : Dict = torch.randn(2, 4, 6_4, 6_4) UpperCAmelCase : Tuple = torch.rand(1) * 9_9_9 UpperCAmelCase : Dict = torch.randn(2, 7_7, 7_6_8) UpperCAmelCase : Union[str, Any] = (sample, timestep, encoder_hidden_status) try: UpperCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: UpperCAmelCase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) UpperCAmelCase : Optional[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) UpperCAmelCase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: UpperCAmelCase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute UpperCAmelCase : List[Any] = 6_6_6 UpperCAmelCase : Union[str, Any] = torch.Generator(device).manual_seed(seed) UpperCAmelCase : Optional[int] = {'generator': generator} if args.steps is not None: UpperCAmelCase : Optional[int] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): UpperCAmelCase : Tuple = pipe(prompt, **generate_kwargs).images[0] # save image image.save('generated.png')
47
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
1
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version UpperCAmelCase : Union[str, Any] = get_logger(__name__) class lowerCamelCase : _lowercase : List[Any] = """dummy_data""" _lowercase : str = """datasets""" _lowercase : Tuple = False def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = True , lowercase__ = None , ) -> Dict: """simple docstring""" _snake_case : List[Any] = 0 _snake_case : Optional[int] = dataset_name _snake_case : str = cache_dir _snake_case : Optional[int] = use_local_dummy_data _snake_case : Dict = config # download_callbacks take a single url as input _snake_case : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _snake_case : List[Any] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _snake_case : List[str] = str(lowercase__ ) # to be downloaded _snake_case : int = None _snake_case : Tuple = None @property def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" if self._dummy_file is None: _snake_case : List[Any] = self.download_dummy_data() return self._dummy_file @property def UpperCAmelCase_ ( self ) -> str: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name ) @property def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : Optional[int] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _snake_case : int = cached_path( lowercase__ , cache_dir=self.cache_dir , extract_compressed_file=lowercase__ , force_extract=lowercase__ ) return os.path.join(lowercase__ , self.dummy_file_name ) @property def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" if self._bucket_url is None: _snake_case : Any = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) ) return self._bucket_url @property def UpperCAmelCase_ ( self ) -> str: """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] ) def UpperCAmelCase_ ( self , lowercase__ , *lowercase__ ) -> Union[str, Any]: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested _snake_case : List[str] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _snake_case : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(lowercase__ , lowercase__ ): return self.create_dummy_data_dict(lowercase__ , lowercase__ ) elif isinstance(lowercase__ , (list, tuple) ): return self.create_dummy_data_list(lowercase__ , lowercase__ ) else: return self.create_dummy_data_single(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , *lowercase__ ) -> Dict: """simple docstring""" return self.download_and_extract(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" return self.download_and_extract(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" return path def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" return {} def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : Union[str, Any] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowercase__ , lowercase__ ): for single_url in single_urls: download_callback(lowercase__ ) else: _snake_case : List[Any] = single_urls download_callback(lowercase__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowercase__ , lowercase__ ): _snake_case : int = [os.path.join(lowercase__ , urllib.parse.quote_plus(Path(lowercase__ ).name ) ) for x in single_urls] else: _snake_case : Dict = single_urls _snake_case : Optional[int] = os.path.join(lowercase__ , urllib.parse.quote_plus(Path(lowercase__ ).name ) ) _snake_case : List[Any] = value # make sure that values are unique if all(isinstance(lowercase__ , lowercase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _snake_case : str = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : Union[str, Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _snake_case : List[Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , lowercase__ ) ) for url in data_url ) _snake_case : Optional[int] = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _snake_case : Tuple = [data_url[0]] * len(lowercase__ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowercase__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _snake_case : Optional[Any] = os.path.join(lowercase__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) ) dummy_data_list.append(lowercase__ ) return dummy_data_list def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Union[str, Any]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(lowercase__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _snake_case : str = os.path.join(lowercase__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) ) if os.path.exists(lowercase__ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" def _iter_archive_members(lowercase__ ): # this preserves the order of the members inside the ZIP archive _snake_case : Optional[Any] = Path(self.dummy_file ).parent _snake_case : Any = path.relative_to(lowercase__ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _snake_case : List[Any] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowercase__ ) _snake_case : Optional[Any] = Path(lowercase__ ) _snake_case : Dict = _iter_archive_members(lowercase__ ) if self.use_local_dummy_data else path.rglob('''*''' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ): yield file_path.relative_to(lowercase__ ).as_posix(), file_path.open('''rb''' ) def UpperCAmelCase_ ( self , lowercase__ ) -> Dict: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ): _snake_case : Tuple = [paths] for path in paths: if os.path.isfile(lowercase__ ): if os.path.basename(lowercase__ ).startswith(('''.''', '''__''') ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowercase__ ): if os.path.basename(lowercase__ ).startswith(('''.''', '''__''') ): continue dirnames.sort() for filename in sorted(lowercase__ ): if filename.startswith(('''.''', '''__''') ): continue yield os.path.join(lowercase__ , lowercase__ )
47
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _snake_case : Optional[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _snake_case : Optional[Any] = 4 _snake_case : Tuple = 48 _snake_case : Optional[Any] = '''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _snake_case : Any = [6, 6, 6, 6] _snake_case : Optional[Any] = 60 _snake_case : Optional[Any] = [6, 6, 6, 6] _snake_case : str = '''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _snake_case : Optional[int] = 4 _snake_case : Any = '''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _snake_case : str = 1 _snake_case : List[Any] = 1 _snake_case : Optional[int] = 126 _snake_case : List[str] = 7 _snake_case : List[str] = 255.0 _snake_case : Optional[int] = '''''' return config def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: _snake_case : List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: _snake_case : Optional[Any] = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: _snake_case : List[str] = name.replace('''layers''' , '''encoder.stages''' ) if "residual_group.blocks" in name: _snake_case : Union[str, Any] = name.replace('''residual_group.blocks''' , '''layers''' ) if "attn.proj" in name: _snake_case : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: _snake_case : int = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: _snake_case : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: _snake_case : List[str] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: _snake_case : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: _snake_case : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: _snake_case : Optional[Any] = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: _snake_case : Any = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: _snake_case : Tuple = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: _snake_case : Optional[Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: _snake_case : Dict = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' ) if name == "norm.weight": _snake_case : List[Any] = '''layernorm.weight''' if name == "norm.bias": _snake_case : Optional[Any] = '''layernorm.bias''' if "conv_first" in name: _snake_case : Optional[int] = name.replace('''conv_first''' , '''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _snake_case : int = name.replace('''conv_last''' , '''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _snake_case : List[Any] = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' ) if "upsample.0" in name: _snake_case : int = name.replace('''upsample.0''' , '''upsample.convolution_0''' ) if "upsample.2" in name: _snake_case : List[str] = name.replace('''upsample.2''' , '''upsample.convolution_1''' ) _snake_case : List[Any] = '''upsample.''' + name elif config.upsampler == "pixelshuffledirect": _snake_case : Union[str, Any] = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' ) _snake_case : List[Any] = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' ) else: pass else: _snake_case : int = '''swin2sr.''' + name return name def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for key in orig_state_dict.copy().keys(): _snake_case : Optional[int] = orig_state_dict.pop(lowerCAmelCase_ ) if "qkv" in key: _snake_case : List[Any] = key.split('''.''' ) _snake_case : str = int(key_split[1] ) _snake_case : List[str] = int(key_split[4] ) _snake_case : List[Any] = config.embed_dim if "weight" in key: _snake_case : Any = val[:dim, :] _snake_case : Any = val[dim : dim * 2, :] _snake_case : Any = val[-dim:, :] else: _snake_case : Any = val[:dim] _snake_case : Optional[Any] = val[dim : dim * 2] _snake_case : Optional[int] = val[-dim:] pass else: _snake_case : Dict = val return orig_state_dict def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = get_config(lowerCAmelCase_ ) _snake_case : Optional[Any] = SwinaSRForImageSuperResolution(lowerCAmelCase_ ) model.eval() _snake_case : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' ) _snake_case : int = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case , _snake_case : Tuple = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(lowerCAmelCase_ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values _snake_case : List[str] = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' _snake_case : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' ) _snake_case : List[Any] = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _snake_case : Optional[Any] = 126 if '''Jpeg''' in checkpoint_url else 256 _snake_case : Tuple = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) _snake_case : Union[str, Any] = transforms(lowerCAmelCase_ ).unsqueeze(0 ) if config.num_channels == 1: _snake_case : Optional[int] = pixel_values[:, 0, :, :].unsqueeze(1 ) _snake_case : Optional[int] = model(lowerCAmelCase_ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _snake_case : Tuple = torch.Size([1, 3, 512, 512] ) _snake_case : Union[str, Any] = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _snake_case : Dict = torch.Size([1, 3, 1_024, 1_024] ) _snake_case : Dict = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _snake_case : Tuple = torch.Size([1, 3, 1_024, 1_024] ) _snake_case : Any = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _snake_case : List[str] = torch.Size([1, 3, 512, 512] ) _snake_case : Optional[Any] = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _snake_case : Dict = torch.Size([1, 3, 1_024, 1_024] ) _snake_case : str = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCAmelCase_ , atol=1E-3 ) print('''Looks ok!''' ) _snake_case : Dict = { '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } _snake_case : Optional[Any] = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCAmelCase_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth', type=str, help='URL of the original Swin2SR checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.') UpperCAmelCase : Dict = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
47
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
1
'''simple docstring''' import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class lowerCamelCase (a__ , a__ ): @register_to_config def __init__( self , lowercase__ = 128 , lowercase__ = 256 , lowercase__ = 2_000.0 , lowercase__ = 768 , lowercase__ = 12 , lowercase__ = 12 , lowercase__ = 64 , lowercase__ = 2_048 , lowercase__ = 0.1 , ) -> Optional[int]: """simple docstring""" super().__init__() _snake_case : str = nn.Sequential( nn.Linear(lowercase__ , d_model * 4 , bias=lowercase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase__ ) , nn.SiLU() , ) _snake_case : List[str] = nn.Embedding(lowercase__ , lowercase__ ) _snake_case : Optional[int] = False _snake_case : str = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) _snake_case : Union[str, Any] = nn.Dropout(p=lowercase__ ) _snake_case : List[Any] = nn.ModuleList() for lyr_num in range(lowercase__ ): # FiLM conditional T5 decoder _snake_case : List[str] = DecoderLayer(d_model=lowercase__ , d_kv=lowercase__ , num_heads=lowercase__ , d_ff=lowercase__ , dropout_rate=lowercase__ ) self.decoders.append(lowercase__ ) _snake_case : int = TaLayerNorm(lowercase__ ) _snake_case : int = nn.Dropout(p=lowercase__ ) _snake_case : List[str] = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : Union[str, Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case , _snake_case , _snake_case : str = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _snake_case : List[Any] = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _snake_case : Tuple = self.conditioning_emb(lowercase__ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _snake_case : Tuple = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _snake_case : Any = torch.broadcast_to( torch.arange(lowercase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _snake_case : int = self.position_encoding(lowercase__ ) _snake_case : List[Any] = self.continuous_inputs_projection(lowercase__ ) inputs += position_encodings _snake_case : int = self.dropout(lowercase__ ) # decoder: No padding present. _snake_case : Any = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _snake_case : Optional[Any] = [(x, self.encoder_decoder_mask(lowercase__ , lowercase__ )) for x, y in encodings_and_masks] # cross attend style: concat encodings _snake_case : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _snake_case : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _snake_case : List[Any] = lyr( lowercase__ , conditioning_emb=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )[0] _snake_case : Any = self.decoder_norm(lowercase__ ) _snake_case : int = self.post_dropout(lowercase__ ) _snake_case : Tuple = self.spec_out(lowercase__ ) return spec_out class lowerCamelCase (nn.Module ): def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=1E-6 ) -> Optional[Any]: """simple docstring""" super().__init__() _snake_case : Dict = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowercase__ , d_kv=lowercase__ , num_heads=lowercase__ , dropout_rate=lowercase__ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowercase__ , d_kv=lowercase__ , num_heads=lowercase__ , dropout_rate=lowercase__ , layer_norm_epsilon=lowercase__ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowercase__ , d_ff=lowercase__ , dropout_rate=lowercase__ , layer_norm_epsilon=lowercase__ ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ) -> List[str]: """simple docstring""" _snake_case : str = self.layer[0]( lowercase__ , conditioning_emb=lowercase__ , attention_mask=lowercase__ , ) if encoder_hidden_states is not None: _snake_case : List[str] = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to( encoder_hidden_states.dtype ) _snake_case : Any = self.layer[1]( lowercase__ , key_value_states=lowercase__ , attention_mask=lowercase__ , ) # Apply Film Conditional Feed Forward layer _snake_case : int = self.layer[-1](lowercase__ , lowercase__ ) return (hidden_states,) class lowerCamelCase (nn.Module ): def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" super().__init__() _snake_case : int = TaLayerNorm(lowercase__ ) _snake_case : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase__ ) _snake_case : Dict = Attention(query_dim=lowercase__ , heads=lowercase__ , dim_head=lowercase__ , out_bias=lowercase__ , scale_qk=lowercase__ ) _snake_case : List[Any] = nn.Dropout(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__=None , lowercase__=None , ) -> Optional[Any]: """simple docstring""" _snake_case : Any = self.layer_norm(lowercase__ ) if conditioning_emb is not None: _snake_case : str = self.FiLMLayer(lowercase__ , lowercase__ ) # Self-attention block _snake_case : Tuple = self.attention(lowercase__ ) _snake_case : Optional[Any] = hidden_states + self.dropout(lowercase__ ) return hidden_states class lowerCamelCase (nn.Module ): def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]: """simple docstring""" super().__init__() _snake_case : Optional[Any] = Attention(query_dim=lowercase__ , heads=lowercase__ , dim_head=lowercase__ , out_bias=lowercase__ , scale_qk=lowercase__ ) _snake_case : Optional[Any] = TaLayerNorm(lowercase__ , eps=lowercase__ ) _snake_case : Any = nn.Dropout(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__=None , lowercase__=None , ) -> Optional[Any]: """simple docstring""" _snake_case : Union[str, Any] = self.layer_norm(lowercase__ ) _snake_case : int = self.attention( lowercase__ , encoder_hidden_states=lowercase__ , attention_mask=attention_mask.squeeze(1 ) , ) _snake_case : str = hidden_states + self.dropout(lowercase__ ) return layer_output class lowerCamelCase (nn.Module ): def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]: """simple docstring""" super().__init__() _snake_case : int = TaDenseGatedActDense(d_model=lowercase__ , d_ff=lowercase__ , dropout_rate=lowercase__ ) _snake_case : List[str] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase__ ) _snake_case : Union[str, Any] = TaLayerNorm(lowercase__ , eps=lowercase__ ) _snake_case : Optional[int] = nn.Dropout(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__=None ) -> List[Any]: """simple docstring""" _snake_case : Union[str, Any] = self.layer_norm(lowercase__ ) if conditioning_emb is not None: _snake_case : Any = self.film(lowercase__ , lowercase__ ) _snake_case : Tuple = self.DenseReluDense(lowercase__ ) _snake_case : Any = hidden_states + self.dropout(lowercase__ ) return hidden_states class lowerCamelCase (nn.Module ): def __init__( self , lowercase__ , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" super().__init__() _snake_case : int = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) _snake_case : Tuple = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) _snake_case : Any = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) _snake_case : str = nn.Dropout(lowercase__ ) _snake_case : Dict = NewGELUActivation() def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : Tuple = self.act(self.wi_a(lowercase__ ) ) _snake_case : Any = self.wi_a(lowercase__ ) _snake_case : Tuple = hidden_gelu * hidden_linear _snake_case : Dict = self.dropout(lowercase__ ) _snake_case : Dict = self.wo(lowercase__ ) return hidden_states class lowerCamelCase (nn.Module ): def __init__( self , lowercase__ , lowercase__=1E-6 ) -> Any: """simple docstring""" super().__init__() _snake_case : List[Any] = nn.Parameter(torch.ones(lowercase__ ) ) _snake_case : Optional[Any] = eps def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]: """simple docstring""" _snake_case : Optional[Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowercase__ ) _snake_case : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _snake_case : str = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class lowerCamelCase (nn.Module ): def UpperCAmelCase_ ( self , lowercase__ ) -> torch.Tensor: """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowercase__ , 3.0 )) )) class lowerCamelCase (nn.Module ): def __init__( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" super().__init__() _snake_case : List[Any] = nn.Linear(lowercase__ , out_features * 2 , bias=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : List[Any] = self.scale_bias(lowercase__ ) _snake_case , _snake_case : Optional[Any] = torch.chunk(lowercase__ , 2 , -1 ) _snake_case : Dict = x * (1 + scale) + shift return x
47
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Any = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : List[str] = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : str = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = None , lowercase__ = True , lowercase__ = 1 / 255 , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ) -> None: """simple docstring""" super().__init__(**lowercase__ ) _snake_case : Optional[int] = size if size is not None else {'''shortest_edge''': 256} _snake_case : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) _snake_case : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _snake_case : Union[str, Any] = get_size_dict(lowercase__ ) _snake_case : int = do_resize _snake_case : Dict = size _snake_case : Optional[int] = resample _snake_case : Dict = do_center_crop _snake_case : Any = crop_size _snake_case : List[Any] = do_rescale _snake_case : List[str] = rescale_factor _snake_case : Union[str, Any] = do_normalize _snake_case : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _snake_case : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ) -> np.ndarray: """simple docstring""" _snake_case : Dict = get_size_dict(lowercase__ , default_to_square=lowercase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _snake_case : Optional[int] = get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__ ) return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ) -> np.ndarray: """simple docstring""" _snake_case : List[Any] = get_size_dict(lowercase__ ) return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ) -> np.ndarray: """simple docstring""" return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> Union[str, Any]: """simple docstring""" _snake_case : List[str] = do_resize if do_resize is not None else self.do_resize _snake_case : int = size if size is not None else self.size _snake_case : Optional[int] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) _snake_case : List[str] = resample if resample is not None else self.resample _snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop _snake_case : Union[str, Any] = crop_size if crop_size is not None else self.crop_size _snake_case : Dict = get_size_dict(lowercase__ ) _snake_case : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize _snake_case : Optional[Any] = image_mean if image_mean is not None else self.image_mean _snake_case : Optional[int] = image_std if image_std is not None else self.image_std _snake_case : Optional[int] = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _snake_case : Union[str, Any] = [to_numpy_array(lowercase__ ) for image in images] if do_resize: _snake_case : Dict = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images] if do_center_crop: _snake_case : Any = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images] if do_rescale: _snake_case : int = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images] if do_normalize: _snake_case : Any = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images] _snake_case : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : int = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Dict = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['XLNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = ['XLNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLNetForMultipleChoice', 'XLNetForQuestionAnswering', 'XLNetForQuestionAnsweringSimple', 'XLNetForSequenceClassification', 'XLNetForTokenClassification', 'XLNetLMHeadModel', 'XLNetModel', 'XLNetPreTrainedModel', 'load_tf_weights_in_xlnet', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLNetForMultipleChoice', 'TFXLNetForQuestionAnsweringSimple', 'TFXLNetForSequenceClassification', 'TFXLNetForTokenClassification', 'TFXLNetLMHeadModel', 'TFXLNetMainLayer', 'TFXLNetModel', 'TFXLNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys UpperCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
1
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class lowerCamelCase : def __init__( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if len(lowercase__ ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) _snake_case : list[float] = list(lowercase__ ) _snake_case : Optional[int] = degree def __add__( self , lowercase__ ) -> Polynomial: """simple docstring""" if self.degree > polynomial_a.degree: _snake_case : Optional[Any] = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , lowercase__ ) else: _snake_case : Any = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , lowercase__ ) def __sub__( self , lowercase__ ) -> Polynomial: """simple docstring""" return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self ) -> Polynomial: """simple docstring""" return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self , lowercase__ ) -> Polynomial: """simple docstring""" _snake_case : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> int | float: """simple docstring""" _snake_case : int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ) -> str: """simple docstring""" _snake_case : str = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase__ ) return polynomial def __repr__( self ) -> str: """simple docstring""" return self.__str__() def UpperCAmelCase_ ( self ) -> Polynomial: """simple docstring""" _snake_case : list[float] = [0] * self.degree for i in range(self.degree ): _snake_case : Optional[Any] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ = 0 ) -> Polynomial: """simple docstring""" _snake_case : list[float] = [0] * (self.degree + 2) _snake_case : Union[str, Any] = constant for i in range(self.degree + 1 ): _snake_case : List[Any] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , lowercase__ ) def __eq__( self , lowercase__ ) -> bool: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self , lowercase__ ) -> bool: """simple docstring""" return not self.__eq__(lowercase__ )
47
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : List[Any] = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class lowerCamelCase (a__ ): _lowercase : Optional[int] = """falcon""" _lowercase : int = ["""past_key_values"""] def __init__( self , lowercase__=65_024 , lowercase__=4_544 , lowercase__=32 , lowercase__=71 , lowercase__=1E-5 , lowercase__=0.02 , lowercase__=True , lowercase__=0.0 , lowercase__=0.0 , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=11 , lowercase__=11 , **lowercase__ , ) -> int: """simple docstring""" _snake_case : Optional[Any] = vocab_size # Backward compatibility with n_embed kwarg _snake_case : Union[str, Any] = kwargs.pop('''n_embed''' , lowercase__ ) _snake_case : str = hidden_size if n_embed is None else n_embed _snake_case : Union[str, Any] = num_hidden_layers _snake_case : Union[str, Any] = num_attention_heads _snake_case : int = layer_norm_epsilon _snake_case : Optional[int] = initializer_range _snake_case : Optional[Any] = use_cache _snake_case : Optional[int] = hidden_dropout _snake_case : List[Any] = attention_dropout _snake_case : Dict = bos_token_id _snake_case : List[str] = eos_token_id _snake_case : Any = num_attention_heads if num_kv_heads is None else num_kv_heads _snake_case : Dict = alibi _snake_case : List[Any] = new_decoder_architecture _snake_case : Optional[int] = multi_query # Ignored when new_decoder_architecture is True _snake_case : List[Any] = parallel_attn _snake_case : Dict = bias super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ ) @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return self.hidden_size // self.num_attention_heads @property def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" return not self.alibi
47
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : Optional[Any] = 1 _snake_case : Union[str, Any] = 3 _snake_case : List[Any] = (32, 32) _snake_case : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase__ ) return image @property def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) _snake_case : List[str] = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowercase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) _snake_case : Any = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) _snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) return CLIPTextModel(lowercase__ ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator _snake_case : Union[str, Any] = self.dummy_cond_unet_upscale _snake_case : List[Any] = DDPMScheduler() _snake_case : Dict = DDIMScheduler(prediction_type='''v_prediction''' ) _snake_case : Union[str, Any] = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _snake_case : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _snake_case : List[str] = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _snake_case : int = StableDiffusionUpscalePipeline( unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=350 , ) _snake_case : Tuple = sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) _snake_case : Union[str, Any] = '''A painting of a squirrel eating a burger''' _snake_case : Any = torch.Generator(device=lowercase__ ).manual_seed(0 ) _snake_case : int = sd_pipe( [prompt] , image=lowercase__ , generator=lowercase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) _snake_case : Dict = output.images _snake_case : Optional[int] = torch.Generator(device=lowercase__ ).manual_seed(0 ) _snake_case : List[str] = sd_pipe( [prompt] , image=lowercase__ , generator=lowercase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=lowercase__ , )[0] _snake_case : Optional[Any] = image[0, -3:, -3:, -1] _snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] _snake_case : Dict = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _snake_case : int = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator _snake_case : Dict = self.dummy_cond_unet_upscale _snake_case : Any = DDPMScheduler() _snake_case : List[str] = DDIMScheduler(prediction_type='''v_prediction''' ) _snake_case : Optional[Any] = self.dummy_vae _snake_case : List[str] = self.dummy_text_encoder _snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _snake_case : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _snake_case : Optional[Any] = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _snake_case : str = StableDiffusionUpscalePipeline( unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=350 , ) _snake_case : List[Any] = sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) _snake_case : str = '''A painting of a squirrel eating a burger''' _snake_case : Optional[Any] = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) _snake_case : Any = output.images assert image.shape[0] == 2 _snake_case : str = torch.Generator(device=lowercase__ ).manual_seed(0 ) _snake_case : Optional[Any] = sd_pipe( [prompt] , image=lowercase__ , generator=lowercase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) _snake_case : str = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : str = self.dummy_cond_unet_upscale _snake_case : Dict = DDPMScheduler() _snake_case : int = DDIMScheduler(prediction_type='''v_prediction''' ) _snake_case : Dict = self.dummy_vae _snake_case : Optional[int] = self.dummy_text_encoder _snake_case : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _snake_case : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _snake_case : Dict = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _snake_case : Tuple = unet.half() _snake_case : int = text_encoder.half() # make sure here that pndm scheduler skips prk _snake_case : Optional[Any] = StableDiffusionUpscalePipeline( unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=350 , ) _snake_case : List[Any] = sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) _snake_case : List[Any] = '''A painting of a squirrel eating a burger''' _snake_case : int = torch.manual_seed(0 ) _snake_case : Tuple = sd_pipe( [prompt] , image=lowercase__ , generator=lowercase__ , num_inference_steps=2 , output_type='''np''' , ).images _snake_case : List[Any] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) _snake_case : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat.npy''' ) _snake_case : Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler''' _snake_case : Tuple = StableDiffusionUpscalePipeline.from_pretrained(lowercase__ ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) pipe.enable_attention_slicing() _snake_case : Union[str, Any] = '''a cat sitting on a park bench''' _snake_case : Any = torch.manual_seed(0 ) _snake_case : Tuple = pipe( prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , output_type='''np''' , ) _snake_case : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) _snake_case : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat_fp16.npy''' ) _snake_case : int = '''stabilityai/stable-diffusion-x4-upscaler''' _snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained( lowercase__ , torch_dtype=torch.floataa , ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) pipe.enable_attention_slicing() _snake_case : List[str] = '''a cat sitting on a park bench''' _snake_case : List[str] = torch.manual_seed(0 ) _snake_case : str = pipe( prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , output_type='''np''' , ) _snake_case : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _snake_case : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) _snake_case : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler''' _snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained( lowercase__ , torch_dtype=torch.floataa , ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _snake_case : str = '''a cat sitting on a park bench''' _snake_case : Tuple = torch.manual_seed(0 ) _snake_case : List[str] = pipe( prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , num_inference_steps=5 , output_type='''np''' , ) _snake_case : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
47
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
1
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase : def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=128 , lowercase__=32 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> str: """simple docstring""" _snake_case : Tuple = parent _snake_case : Union[str, Any] = batch_size _snake_case : List[Any] = seq_length _snake_case : str = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : Optional[int] = use_token_type_ids _snake_case : List[Any] = use_labels _snake_case : str = vocab_size _snake_case : Union[str, Any] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Dict = intermediate_size _snake_case : Union[str, Any] = hidden_act _snake_case : List[str] = hidden_dropout_prob _snake_case : str = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : List[str] = type_vocab_size _snake_case : Dict = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : int = num_labels _snake_case : Optional[int] = num_choices _snake_case : Dict = scope def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Optional[int] = None if self.use_input_mask: _snake_case : int = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : int = None if self.use_token_type_ids: _snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case : Optional[Any] = None _snake_case : Any = None _snake_case : List[str] = None if self.use_labels: _snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case : str = ids_tensor([self.batch_size] , self.num_choices ) _snake_case : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = self.prepare_config_and_inputs() _snake_case : Optional[int] = True _snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : List[str] = NezhaModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Dict = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ ) _snake_case : str = model(lowercase__ , token_type_ids=lowercase__ ) _snake_case : Union[str, Any] = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Union[str, Any]: """simple docstring""" _snake_case : Optional[Any] = True _snake_case : Optional[Any] = NezhaModel(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Union[str, Any] = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , ) _snake_case : Any = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , encoder_hidden_states=lowercase__ , ) _snake_case : int = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : Dict = NezhaForMaskedLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : int = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : Optional[Any] = NezhaForNextSentencePrediction(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Union[str, Any] = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : str = NezhaForPreTraining(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Dict = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , next_sentence_label=lowercase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : str = NezhaForQuestionAnswering(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Optional[int] = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : List[Any] = self.num_labels _snake_case : str = NezhaForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : int = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Union[str, Any] = self.num_labels _snake_case : Any = NezhaForTokenClassification(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]: """simple docstring""" _snake_case : Dict = self.num_choices _snake_case : Tuple = NezhaForMultipleChoice(config=lowercase__ ) model.to(lowercase__ ) model.eval() _snake_case : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case : Dict = model( lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : List[Any] = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : int = config_and_inputs _snake_case : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase (a__ , a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) _lowercase : Dict = ( { """feature-extraction""": NezhaModel, """fill-mask""": NezhaForMaskedLM, """question-answering""": NezhaForQuestionAnswering, """text-classification""": NezhaForSequenceClassification, """token-classification""": NezhaForTokenClassification, """zero-shot""": NezhaForSequenceClassification, } if is_torch_available() else {} ) _lowercase : Any = True def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__=False ) -> Dict: """simple docstring""" _snake_case : Union[str, Any] = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ ) if return_labels: if model_class in get_values(lowercase__ ): _snake_case : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase__ ) _snake_case : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase__ ) return inputs_dict def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = NezhaModelTester(self ) _snake_case : List[str] = ConfigTester(self , config_class=lowercase__ , hidden_size=37 ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() _snake_case : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase__ ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Any = NezhaModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return _snake_case : Dict = True _snake_case : Tuple = model_class(config=lowercase__ ) _snake_case : Dict = self._prepare_for_class(lowercase__ , lowercase__ ) _snake_case : Dict = torch.jit.trace( lowercase__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowercase__ , os.path.join(lowercase__ , '''bert.pt''' ) ) _snake_case : Optional[int] = torch.jit.load(os.path.join(lowercase__ , '''bert.pt''' ) , map_location=lowercase__ ) loaded(inputs_dict['''input_ids'''].to(lowercase__ ) , inputs_dict['''attention_mask'''].to(lowercase__ ) ) @require_torch class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Optional[int] = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) _snake_case : str = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _snake_case : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _snake_case : Any = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Optional[int] = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , lowercase__ ) _snake_case : Optional[Any] = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : str = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) _snake_case : str = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _snake_case : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _snake_case : Any = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : int = torch.Size((1, 6, 21_128) ) self.assertEqual(output.shape , lowercase__ ) _snake_case : Tuple = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1E-4 ) )
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : int = {'vocab_file': 'spiece.model'} UpperCAmelCase : str = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 UpperCAmelCase : Any = { 't5-small': 5_1_2, 't5-base': 5_1_2, 't5-large': 5_1_2, 't5-3b': 5_1_2, 't5-11b': 5_1_2, } UpperCAmelCase : List[str] = '▁' class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP _lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__ , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__=100 , lowercase__=None , lowercase__ = None , lowercase__=True , **lowercase__ , ) -> None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: _snake_case : Tuple = [F'''<extra_id_{i}>''' for i in range(lowercase__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _snake_case : List[Any] = len(set(filter(lambda lowercase__ : bool('''extra_id''' in str(lowercase__ ) ) , lowercase__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) if legacy: logger.warning_once( F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' ) _snake_case : List[Any] = legacy _snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , extra_ids=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase__ , **lowercase__ , ) _snake_case : Tuple = vocab_file _snake_case : int = extra_ids _snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ) -> Any: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _snake_case : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase__ , ) return max_model_length @property def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Dict = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(lowercase__ )) + [1] return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1] def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" return list( set(filter(lambda lowercase__ : bool(re.search(r'''<extra_id_\d+>''' , lowercase__ ) ) is not None , self.additional_special_tokens ) ) ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return [self._convert_token_to_id(lowercase__ ) for token in self.get_sentinel_tokens()] def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" _snake_case : Optional[int] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" _snake_case : str = self._add_eos_if_not_present(lowercase__ ) if token_ids_a is None: return token_ids_a else: _snake_case : Tuple = self._add_eos_if_not_present(lowercase__ ) return token_ids_a + token_ids_a def __getstate__( self ) -> Dict: """simple docstring""" _snake_case : Dict = self.__dict__.copy() _snake_case : List[str] = None return state def __setstate__( self , lowercase__ ) -> List[str]: """simple docstring""" _snake_case : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _snake_case : List[Any] = {} _snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self , lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" if not self.legacy: _snake_case : Tuple = SPIECE_UNDERLINE + text.replace(lowercase__ , ''' ''' ) return super().tokenize(lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , **lowercase__ ) -> Tuple: """simple docstring""" if not self.legacy: _snake_case : Any = text.startswith(lowercase__ ) if is_first: _snake_case : List[str] = text[1:] _snake_case : List[Any] = self.sp_model.encode(lowercase__ , out_type=lowercase__ ) if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(lowercase__ ): _snake_case : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def UpperCAmelCase_ ( self , lowercase__ ) -> int: """simple docstring""" if token.startswith('''<extra_id_''' ): _snake_case : Optional[Any] = re.match(r'''<extra_id_(\d+)>''' , lowercase__ ) _snake_case : Union[str, Any] = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[str]: """simple docstring""" if index < self.sp_model.get_piece_size(): _snake_case : int = self.sp_model.IdToPiece(lowercase__ ) else: _snake_case : int = F'''<extra_id_{self.vocab_size - 1 - index}>''' return token def UpperCAmelCase_ ( self , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : Any = [] _snake_case : int = '''''' _snake_case : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase__ ) + token _snake_case : List[str] = True _snake_case : Any = [] else: current_sub_tokens.append(lowercase__ ) _snake_case : str = False out_string += self.sp_model.decode(lowercase__ ) return out_string.strip() def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowercase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case : Any = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase__ , '''wb''' ) as fi: _snake_case : int = self.sp_model.serialized_model_proto() fi.write(lowercase__ ) return (out_vocab_file,)
47
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase : Union[str, Any] = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
47
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
1
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = np.argmax(lowerCAmelCase_ , axis=1 ) return np.sum(outputs == labels ) def _a ( lowerCAmelCase_ ): """simple docstring""" with open(lowerCAmelCase_ , encoding='''utf_8''' ) as f: _snake_case : Dict = csv.reader(lowerCAmelCase_ ) _snake_case : Tuple = [] next(lowerCAmelCase_ ) # skip the first line for line in tqdm(lowerCAmelCase_ ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Union[str, Any] = [] for dataset in encoded_datasets: _snake_case : List[Any] = len(lowerCAmelCase_ ) _snake_case : int = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) _snake_case : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa ) _snake_case : Optional[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) _snake_case : int = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(lowerCAmelCase_ ): _snake_case : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _snake_case : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _snake_case : List[Any] = with_conta _snake_case : Dict = with_conta _snake_case : str = len(lowerCAmelCase_ ) - 1 _snake_case : List[Any] = len(lowerCAmelCase_ ) - 1 _snake_case : Optional[Any] = with_conta _snake_case : List[Any] = with_conta _snake_case : Optional[Any] = mc_label _snake_case : Optional[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(lowerCAmelCase_ ) for t in all_inputs ) ) return tensor_datasets def _a ( ): """simple docstring""" _snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=lowerCAmelCase_ , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=lowerCAmelCase_ , default='''''' ) parser.add_argument('''--eval_dataset''' , type=lowerCAmelCase_ , default='''''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--num_train_epochs''' , type=lowerCAmelCase_ , default=3 ) parser.add_argument('''--train_batch_size''' , type=lowerCAmelCase_ , default=8 ) parser.add_argument('''--eval_batch_size''' , type=lowerCAmelCase_ , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowerCAmelCase_ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=lowerCAmelCase_ , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=lowerCAmelCase_ , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=lowerCAmelCase_ , default=6.25E-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowerCAmelCase_ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=lowerCAmelCase_ , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=lowerCAmelCase_ , default=0.01 ) parser.add_argument('''--lm_coef''' , type=lowerCAmelCase_ , default=0.9 ) parser.add_argument('''--n_valid''' , type=lowerCAmelCase_ , default=374 ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[int] = parser.parse_args() print(lowerCAmelCase_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) _snake_case : Optional[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) _snake_case : Dict = torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(lowerCAmelCase_ , lowerCAmelCase_ ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset _snake_case : Optional[Any] = ['''_start_''', '''_delimiter_''', '''_classify_'''] _snake_case : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(lowerCAmelCase_ ) _snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) _snake_case : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(lowerCAmelCase_ ) ) model.to(lowerCAmelCase_ ) # Load and encode the datasets def tokenize_and_encode(lowerCAmelCase_ ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCAmelCase_ ) ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return obj return [tokenize_and_encode(lowerCAmelCase_ ) for o in obj] logger.info('''Encoding dataset...''' ) _snake_case : Optional[Any] = load_rocstories_dataset(args.train_dataset ) _snake_case : List[Any] = load_rocstories_dataset(args.eval_dataset ) _snake_case : Dict = (train_dataset, eval_dataset) _snake_case : Optional[Any] = tokenize_and_encode(lowerCAmelCase_ ) # Compute the max input length for the Transformer _snake_case : Tuple = model.config.n_positions // 2 - 2 _snake_case : Optional[Any] = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) _snake_case : Optional[int] = min(lowerCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders _snake_case : int = pre_process_datasets(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ) _snake_case , _snake_case : Dict = tensor_datasets[0], tensor_datasets[1] _snake_case : str = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[Any] = RandomSampler(lowerCAmelCase_ ) _snake_case : List[Any] = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.train_batch_size ) _snake_case : str = TensorDataset(*lowerCAmelCase_ ) _snake_case : Any = SequentialSampler(lowerCAmelCase_ ) _snake_case : str = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: _snake_case : Optional[int] = args.max_steps _snake_case : Optional[int] = args.max_steps // (len(lowerCAmelCase_ ) // args.gradient_accumulation_steps) + 1 else: _snake_case : Dict = len(lowerCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs _snake_case : Union[str, Any] = list(model.named_parameters() ) _snake_case : str = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] _snake_case : Union[str, Any] = [ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] _snake_case : Any = AdamW(lowerCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon ) _snake_case : str = get_linear_schedule_with_warmup( lowerCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCAmelCase_ ) if args.do_train: _snake_case , _snake_case , _snake_case : Optional[int] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): _snake_case : str = 0 _snake_case : List[str] = 0 _snake_case : Tuple = tqdm(lowerCAmelCase_ , desc='''Training''' ) for step, batch in enumerate(lowerCAmelCase_ ): _snake_case : Any = tuple(t.to(lowerCAmelCase_ ) for t in batch ) _snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = batch _snake_case : Union[str, Any] = model(lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ ) _snake_case : List[Any] = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() _snake_case : Optional[Any] = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 _snake_case : int = '''Training loss: {:.2e} lr: {:.2e}'''.format(lowerCAmelCase_ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer _snake_case : Optional[int] = model.module if hasattr(lowerCAmelCase_ , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` _snake_case : Dict = os.path.join(args.output_dir , lowerCAmelCase_ ) _snake_case : Dict = os.path.join(args.output_dir , lowerCAmelCase_ ) torch.save(model_to_save.state_dict() , lowerCAmelCase_ ) model_to_save.config.to_json_file(lowerCAmelCase_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned _snake_case : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) _snake_case : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(lowerCAmelCase_ ) if args.do_eval: model.eval() _snake_case , _snake_case : Tuple = 0, 0 _snake_case , _snake_case : str = 0, 0 for batch in tqdm(lowerCAmelCase_ , desc='''Evaluating''' ): _snake_case : Any = tuple(t.to(lowerCAmelCase_ ) for t in batch ) _snake_case , _snake_case , _snake_case , _snake_case : Dict = batch with torch.no_grad(): _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = model( lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ ) _snake_case : int = mc_logits.detach().cpu().numpy() _snake_case : Any = mc_labels.to('''cpu''' ).numpy() _snake_case : Optional[Any] = accuracy(lowerCAmelCase_ , lowerCAmelCase_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 _snake_case : Optional[int] = eval_loss / nb_eval_steps _snake_case : Optional[int] = eval_accuracy / nb_eval_examples _snake_case : List[Any] = tr_loss / nb_tr_steps if args.do_train else None _snake_case : Tuple = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} _snake_case : str = os.path.join(args.output_dir , '''eval_results.txt''' ) with open(lowerCAmelCase_ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , lowerCAmelCase_ , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError('''String lengths must match!''' ) _snake_case : List[str] = 0 for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
1
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Tuple = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCAmelCase : str = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase : List[Any] = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase : Optional[int] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCAmelCase : List[Any] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCAmelCase : Optional[Any] = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCAmelCase : Any = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCAmelCase : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCAmelCase : Tuple = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCAmelCase : List[str] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCAmelCase : List[str] = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCAmelCase : Any = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCAmelCase : str = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCAmelCase : Dict = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowerCamelCase (_BaseAutoModelClass ): _lowercase : Optional[Any] = FLAX_MODEL_MAPPING UpperCAmelCase : List[str] = auto_class_update(FlaxAutoModel) class lowerCamelCase (_BaseAutoModelClass ): _lowercase : List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class lowerCamelCase (_BaseAutoModelClass ): _lowercase : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class lowerCamelCase (_BaseAutoModelClass ): _lowercase : Dict = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase : Optional[int] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class lowerCamelCase (_BaseAutoModelClass ): _lowercase : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase : List[Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class lowerCamelCase (_BaseAutoModelClass ): _lowercase : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase : Optional[int] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class lowerCamelCase (_BaseAutoModelClass ): _lowercase : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class lowerCamelCase (_BaseAutoModelClass ): _lowercase : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase : Union[str, Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class lowerCamelCase (_BaseAutoModelClass ): _lowercase : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase : List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class lowerCamelCase (_BaseAutoModelClass ): _lowercase : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase : Optional[int] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class lowerCamelCase (_BaseAutoModelClass ): _lowercase : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase : List[str] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class lowerCamelCase (_BaseAutoModelClass ): _lowercase : Tuple = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase : Optional[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class lowerCamelCase (_BaseAutoModelClass ): _lowercase : Any = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase : Dict = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right UpperCAmelCase : List[Any] = 2_5_0_0_0_4 UpperCAmelCase : Tuple = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class lowerCamelCase (a__ , unittest.TestCase ): _lowercase : str = MBartTokenizer _lowercase : List[Any] = MBartTokenizerFast _lowercase : int = True _lowercase : int = True def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _snake_case : str = MBartTokenizer(lowercase__ , keep_accents=lowercase__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : Tuple = MBartTokenizer(lowercase__ , keep_accents=lowercase__ ) _snake_case : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _snake_case : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _snake_case : str = tokenizer.convert_tokens_to_ids(lowercase__ ) self.assertListEqual( lowercase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _snake_case : Tuple = tokenizer.convert_ids_to_tokens(lowercase__ ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _snake_case : Any = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) _snake_case : List[Any] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) _snake_case : Dict = tempfile.mkdtemp() _snake_case : int = tokenizer_r.save_pretrained(lowercase__ ) _snake_case : Dict = tokenizer_p.save_pretrained(lowercase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _snake_case : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowercase__ , lowercase__ ) # Checks everything loads correctly in the same way _snake_case : Tuple = tokenizer_r.from_pretrained(lowercase__ ) _snake_case : List[str] = tokenizer_p.from_pretrained(lowercase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase__ , lowercase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowercase__ ) # Save tokenizer rust, legacy_format=True _snake_case : Tuple = tempfile.mkdtemp() _snake_case : List[str] = tokenizer_r.save_pretrained(lowercase__ , legacy_format=lowercase__ ) _snake_case : Any = tokenizer_p.save_pretrained(lowercase__ ) # Checks it save with the same files self.assertSequenceEqual(lowercase__ , lowercase__ ) # Checks everything loads correctly in the same way _snake_case : Union[str, Any] = tokenizer_r.from_pretrained(lowercase__ ) _snake_case : Dict = tokenizer_p.from_pretrained(lowercase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase__ , lowercase__ ) ) shutil.rmtree(lowercase__ ) # Save tokenizer rust, legacy_format=False _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[Any] = tokenizer_r.save_pretrained(lowercase__ , legacy_format=lowercase__ ) _snake_case : str = tokenizer_p.save_pretrained(lowercase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _snake_case : str = tokenizer_r.from_pretrained(lowercase__ ) _snake_case : Tuple = tokenizer_p.from_pretrained(lowercase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase__ , lowercase__ ) ) shutil.rmtree(lowercase__ ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase (unittest.TestCase ): _lowercase : Any = """facebook/mbart-large-en-ro""" _lowercase : str = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] _lowercase : Union[str, Any] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] _lowercase : Any = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def UpperCAmelCase_ ( cls ) -> Dict: """simple docstring""" _snake_case : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _snake_case : int = 1 return cls def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250_004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250_020 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowercase__ ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" self.assertIn(lowercase__ , self.tokenizer.all_special_ids ) _snake_case : Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2] _snake_case : str = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) _snake_case : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) self.assertNotIn(self.tokenizer.eos_token , lowercase__ ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Tuple = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , lowercase__ ) _snake_case : str = 10 _snake_case : Dict = self.tokenizer(lowercase__ , max_length=lowercase__ , truncation=lowercase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowercase__ ) self.assertEqual(len(lowercase__ ) , lowercase__ ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250_026, 250_001] ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Tuple = tempfile.mkdtemp() _snake_case : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase__ ) _snake_case : Tuple = MBartTokenizer.from_pretrained(lowercase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase__ ) @require_torch def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase__ , return_tensors='''pt''' ) _snake_case : Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Tuple = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _snake_case : Optional[int] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _snake_case : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowercase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Dict = self.tokenizer(self.src_text , padding=lowercase__ , truncation=lowercase__ , max_length=3 , return_tensors='''pt''' ) _snake_case : Tuple = self.tokenizer( text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=10 , return_tensors='''pt''' ) _snake_case : Any = targets['''input_ids'''] _snake_case : Dict = shift_tokens_right(lowercase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[Any] = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowercase__ ) , { # A, test, EOS, en_XX '''input_ids''': [[62, 3_034, 2, 250_004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250_001, } , )
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' import torch def _a ( ): """simple docstring""" if torch.cuda.is_available(): _snake_case : Tuple = torch.cuda.device_count() else: _snake_case : List[str] = 0 print(f'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = filter(lambda lowerCAmelCase_ : p.requires_grad , model.parameters() ) _snake_case : str = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCAmelCase : Dict = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if metric == "rouge2": _snake_case : Optional[Any] = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": _snake_case : Any = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": _snake_case : Optional[int] = '''{val_avg_em:.4f}-{step_count}''' elif metric == "loss": _snake_case : Union[str, Any] = '''{val_avg_loss:.4f}-{step_count}''' else: raise NotImplementedError( f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ''' function.''' ) _snake_case : List[str] = ModelCheckpoint( dirpath=lowerCAmelCase_ , filename=lowerCAmelCase_ , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return EarlyStopping( monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCAmelCase_ , verbose=lowerCAmelCase_ , ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Tuple = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowercase__ ) @rank_zero_only def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=True ) -> None: """simple docstring""" logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) _snake_case : Dict = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results _snake_case : int = Path(pl_module.hparams.output_dir ) if type_path == "test": _snake_case : str = od / '''test_results.txt''' _snake_case : Union[str, Any] = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _snake_case : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' _snake_case : Optional[int] = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=lowercase__ ) generations_file.parent.mkdir(exist_ok=lowercase__ ) with open(lowercase__ , '''a+''' ) as writer: for key in sorted(lowercase__ ): if key in ["log", "progress_bar", "preds"]: continue _snake_case : Optional[Any] = metrics[key] if isinstance(lowercase__ , torch.Tensor ): _snake_case : Optional[int] = val.item() _snake_case : int = F'''{key}: {val:.6f}\n''' writer.write(lowercase__ ) if not save_generations: return if "preds" in metrics: _snake_case : Dict = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(lowercase__ ) @rank_zero_only def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" try: _snake_case : Dict = pl_module.model.model.num_parameters() except AttributeError: _snake_case : str = pl_module.model.num_parameters() _snake_case : Any = count_trainable_parameters(lowercase__ ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} ) @rank_zero_only def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Optional[int]: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowercase__ , lowercase__ , '''test''' ) @rank_zero_only def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
47
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if len(lowerCAmelCase_ ) < 2: return collection def circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: _snake_case : List[str] = False if low == high: return swapped _snake_case : Any = low _snake_case : Optional[int] = high while left < right: if collection[left] > collection[right]: _snake_case , _snake_case : Union[str, Any] = ( collection[right], collection[left], ) _snake_case : List[str] = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: _snake_case , _snake_case : Optional[Any] = ( collection[right + 1], collection[left], ) _snake_case : List[Any] = True _snake_case : Union[str, Any] = low + int((high - low) / 2 ) _snake_case : Dict = circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[Any] = circle_sort_util(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) return swapped or left_swap or right_swap _snake_case : int = True while is_not_sorted is True: _snake_case : List[str] = circle_sort_util(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) - 1 ) return collection if __name__ == "__main__": UpperCAmelCase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(',')] print(circle_sort(unsorted))
47
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : List[Any] = { 'configuration_nllb_moe': [ 'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NllbMoeConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = [ 'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST', 'NllbMoeForConditionalGeneration', 'NllbMoeModel', 'NllbMoePreTrainedModel', 'NllbMoeTop2Router', 'NllbMoeSparseMLP', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
1
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('0.8.3'): raise Exception('requires gluonnlp == 0.8.3') if version.parse(mx.__version__) != version.parse('1.5.0'): raise Exception('requires mxnet == 1.5.0') logging.set_verbosity_info() UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : Tuple = 'The Nymphenburg Palace is a beautiful palace in Munich!' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 1_024, '''hidden_size''': 768, '''max_length''': 512, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 1_024, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } _snake_case : str = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py _snake_case : Dict = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=lowerCAmelCase_ , output_all_encodings=lowerCAmelCase_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , lowerCAmelCase_ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later _snake_case : int = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab _snake_case : str = os.path.join(get_home_dir() , '''models''' ) _snake_case : Optional[int] = _load_vocab(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls=lowerCAmelCase_ ) _snake_case : List[str] = nlp.model.BERTModel( lowerCAmelCase_ , len(lowerCAmelCase_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=lowerCAmelCase_ , use_token_type_embed=lowerCAmelCase_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=lowerCAmelCase_ , use_decoder=lowerCAmelCase_ , ) original_bort.load_parameters(lowerCAmelCase_ , cast_dtype=lowerCAmelCase_ , ignore_extra=lowerCAmelCase_ ) _snake_case : List[Any] = original_bort._collect_params_with_prefix() # Build our config 🤗 _snake_case : Union[str, Any] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(lowerCAmelCase_ ), } _snake_case : Union[str, Any] = BertConfig.from_dict(lowerCAmelCase_ ) _snake_case : List[str] = BertForMaskedLM(lowerCAmelCase_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCAmelCase_ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Dict = hf_param.shape _snake_case : Union[str, Any] = to_torch(params[gluon_param] ) _snake_case : str = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param _snake_case : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) _snake_case : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) _snake_case : int = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) _snake_case : int = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) _snake_case : Tuple = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): _snake_case : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention _snake_case : BertSelfAttention = layer.attention.self _snake_case : List[str] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) _snake_case : List[str] = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) _snake_case : int = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) _snake_case : List[Any] = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) _snake_case : List[str] = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) _snake_case : Union[str, Any] = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output _snake_case : BertSelfOutput = layer.attention.output _snake_case : Union[str, Any] = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) _snake_case : str = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) _snake_case : Any = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) _snake_case : Tuple = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate _snake_case : BertIntermediate = layer.intermediate _snake_case : List[str] = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) _snake_case : Tuple = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output _snake_case : BertOutput = layer.output _snake_case : str = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) _snake_case : str = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) _snake_case : Optional[Any] = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) _snake_case : int = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models _snake_case : Optional[Any] = RobertaTokenizer.from_pretrained('''roberta-base''' ) _snake_case : Union[str, Any] = tokenizer.encode_plus(lowerCAmelCase_ )['''input_ids'''] # Get gluon output _snake_case : Tuple = mx.nd.array([input_ids] ) _snake_case : int = original_bort(inputs=lowerCAmelCase_ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCAmelCase_ ) _snake_case : Tuple = BertModel.from_pretrained(lowerCAmelCase_ ) hf_bort_model.eval() _snake_case : Dict = tokenizer.encode_plus(lowerCAmelCase_ , return_tensors='''pt''' ) _snake_case : List[str] = hf_bort_model(**lowerCAmelCase_ )[0] _snake_case : Union[str, Any] = output_gluon[0].asnumpy() _snake_case : Dict = output_hf[0].detach().numpy() _snake_case : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item() _snake_case : int = np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase : Tuple = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
47
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Union[str, Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : str = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class lowerCamelCase (a__ ): _lowercase : Any = """roberta""" def __init__( self , lowercase__=50_265 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1E-1_2 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> int: """simple docstring""" super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ ) _snake_case : Optional[int] = vocab_size _snake_case : List[Any] = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : Dict = num_attention_heads _snake_case : Any = hidden_act _snake_case : Tuple = intermediate_size _snake_case : Optional[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : int = max_position_embeddings _snake_case : List[Any] = type_vocab_size _snake_case : int = initializer_range _snake_case : List[str] = layer_norm_eps _snake_case : Optional[Any] = position_embedding_type _snake_case : List[str] = use_cache _snake_case : int = classifier_dropout class lowerCamelCase (a__ ): @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _snake_case : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _snake_case : str = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
47
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase : Any = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowerCamelCase (a__ ): _lowercase : Union[str, Any] = """speech_to_text""" _lowercase : str = ["""past_key_values"""] _lowercase : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , lowercase__=10_000 , lowercase__=12 , lowercase__=2_048 , lowercase__=4 , lowercase__=6 , lowercase__=2_048 , lowercase__=4 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="relu" , lowercase__=256 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=2 , lowercase__=True , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__=6_000 , lowercase__=1_024 , lowercase__=2 , lowercase__=(5, 5) , lowercase__=1_024 , lowercase__=80 , lowercase__=1 , **lowercase__ , ) -> List[Any]: """simple docstring""" _snake_case : int = vocab_size _snake_case : str = d_model _snake_case : Optional[Any] = encoder_ffn_dim _snake_case : List[str] = encoder_layers _snake_case : List[Any] = encoder_attention_heads _snake_case : Union[str, Any] = decoder_ffn_dim _snake_case : List[str] = decoder_layers _snake_case : Any = decoder_attention_heads _snake_case : Any = dropout _snake_case : List[str] = attention_dropout _snake_case : Dict = activation_dropout _snake_case : List[str] = activation_function _snake_case : Optional[int] = init_std _snake_case : List[str] = encoder_layerdrop _snake_case : Any = decoder_layerdrop _snake_case : Any = use_cache _snake_case : Optional[Any] = encoder_layers _snake_case : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True _snake_case : int = max_source_positions _snake_case : Optional[int] = max_target_positions _snake_case : int = num_conv_layers _snake_case : Optional[int] = list(lowercase__ ) _snake_case : List[str] = conv_channels _snake_case : Tuple = input_feat_per_channel _snake_case : int = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ''' F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
47
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if height >= 1: move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) move_disk(lowerCAmelCase_ , lowerCAmelCase_ ) move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" print('''moving disk from''' , lowerCAmelCase_ , '''to''' , lowerCAmelCase_ ) def _a ( ): """simple docstring""" _snake_case : Any = int(input('''Height of hanoi: ''' ).strip() ) move_tower(lowerCAmelCase_ , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
47
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
1
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" _snake_case : str = 3 _snake_case : Tuple = 250 _snake_case : int = ids_tensor((batch_size, length) , lowercase__ ) _snake_case : str = torch.ones((batch_size, length) , device=lowercase__ , dtype=torch.float ) / length return input_ids, scores def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case , _snake_case : List[Any] = self._get_tensors(5 ) _snake_case : int = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowercase__ , lowercase__ ) ) _snake_case , _snake_case : Union[str, Any] = self._get_tensors(9 ) self.assertFalse(criteria(lowercase__ , lowercase__ ) ) _snake_case , _snake_case : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(lowercase__ , lowercase__ ) ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Dict = MaxLengthCriteria(max_length=10 ) _snake_case , _snake_case : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(lowercase__ , lowercase__ ) ) _snake_case , _snake_case : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(lowercase__ , lowercase__ ) ) _snake_case , _snake_case : str = self._get_tensors(10 ) self.assertTrue(criteria(lowercase__ , lowercase__ ) ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) _snake_case , _snake_case : List[str] = self._get_tensors(5 ) self.assertFalse(criteria(lowercase__ , lowercase__ ) ) _snake_case , _snake_case : Dict = self._get_tensors(9 ) self.assertFalse(criteria(lowercase__ , lowercase__ ) ) _snake_case , _snake_case : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowercase__ , lowercase__ ) ) _snake_case : Dict = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case , _snake_case : Optional[Any] = self._get_tensors(5 ) _snake_case : List[Any] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowercase__ , lowercase__ ) ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowercase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) _snake_case : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowercase__ ) , 1 )
47
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : List[str] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Optional[int] = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } UpperCAmelCase : Dict = { 'google/rembert': 2_5_6, } UpperCAmelCase : str = '▁' class lowerCamelCase (a__ ): _lowercase : Tuple = VOCAB_FILES_NAMES _lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Union[str, Any] = RemBertTokenizer def __init__( self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__="[CLS]" , lowercase__="[SEP]" , lowercase__="<unk>" , lowercase__="[SEP]" , lowercase__="<pad>" , lowercase__="[CLS]" , lowercase__="[MASK]" , **lowercase__ , ) -> Any: """simple docstring""" _snake_case : List[str] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token super().__init__( lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , **lowercase__ , ) _snake_case : Any = do_lower_case _snake_case : Union[str, Any] = remove_space _snake_case : Any = keep_accents _snake_case : Union[str, Any] = vocab_file _snake_case : str = False if not self.vocab_file else True def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" _snake_case : str = [self.sep_token_id] _snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1] return [1] + ([0] * len(lowercase__ )) + [1] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" _snake_case : Optional[int] = [self.sep_token_id] _snake_case : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowercase__ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase__ ) ) return _snake_case : Dict = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ): copyfile(self.vocab_file , lowercase__ ) return (out_vocab_file,)
47
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
1