code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : List[str] = "unispeech" def __init__( self, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_="group", SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=(512, 512, 512, 512, 512, 512, 512), SCREAMING_SNAKE_CASE_=(5, 2, 2, 2, 2, 2, 2), SCREAMING_SNAKE_CASE_=(10, 3, 3, 3, 3, 2, 2), SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0.05, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=320, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=100, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_="mean", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=80, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.5, **SCREAMING_SNAKE_CASE_, ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_, pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = hidden_size UpperCamelCase : Dict = feat_extract_norm UpperCamelCase : Any = feat_extract_activation UpperCamelCase : str = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = conv_bias UpperCamelCase : Union[str, Any] = num_conv_pos_embeddings UpperCamelCase : Union[str, Any] = num_conv_pos_embedding_groups UpperCamelCase : int = len(self.conv_dim ) UpperCamelCase : str = num_hidden_layers UpperCamelCase : List[Any] = intermediate_size UpperCamelCase : List[str] = hidden_act UpperCamelCase : str = num_attention_heads UpperCamelCase : Dict = hidden_dropout UpperCamelCase : Any = attention_dropout UpperCamelCase : List[Any] = activation_dropout UpperCamelCase : Any = feat_proj_dropout UpperCamelCase : Tuple = final_dropout UpperCamelCase : Optional[Any] = layerdrop UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : Tuple = initializer_range UpperCamelCase : Optional[Any] = num_ctc_classes UpperCamelCase : Any = vocab_size UpperCamelCase : int = do_stable_layer_norm UpperCamelCase : int = use_weighted_layer_sum UpperCamelCase : Tuple = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase : str = apply_spec_augment UpperCamelCase : Optional[int] = mask_time_prob UpperCamelCase : Any = mask_time_length UpperCamelCase : Optional[Any] = mask_time_min_masks UpperCamelCase : int = mask_feature_prob UpperCamelCase : Union[str, Any] = mask_feature_length UpperCamelCase : Optional[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCamelCase : int = num_codevectors_per_group UpperCamelCase : str = num_codevector_groups UpperCamelCase : Optional[Any] = contrastive_logits_temperature UpperCamelCase : Optional[Any] = feat_quantizer_dropout UpperCamelCase : Union[str, Any] = num_negatives UpperCamelCase : Dict = codevector_dim UpperCamelCase : Tuple = proj_codevector_dim UpperCamelCase : int = diversity_loss_weight # ctc loss UpperCamelCase : int = ctc_loss_reduction UpperCamelCase : str = ctc_zero_infinity # pretraining loss UpperCamelCase : Dict = replace_prob @property def snake_case_ ( self ) -> List[str]: return functools.reduce(operator.mul, self.conv_stride, 1 )
40
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __A : List[Any] = logging.get_logger(__name__) __A : Any = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] __A : Optional[int] = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def lowercase ( UpperCamelCase : Tuple ): """simple docstring""" A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" ) return sd def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ): """simple docstring""" A__ : List[str] =OrderedDict() A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A__ : Optional[Any] =key for name_pair in rename_keys_prefix: A__ : int =new_key.replace(name_pair[0] , name_pair[1] ) A__ : Dict =d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A__ : Optional[int] =new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: A__ : Any ="pretraining" if "vcr" in checkpoint_path: A__ : Union[str, Any] ={"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: A__ : Optional[Any] ={"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: A__ : List[str] ={"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 512} A__ : List[str] ="multichoice" elif "vqa_advanced" in checkpoint_path: A__ : Any ={"visual_embedding_dim": 2048} A__ : str ="vqa_advanced" elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129} A__ : str ="vqa" elif "nlvr" in checkpoint_path: A__ : str ={ "visual_embedding_dim": 1024, "num_labels": 2, } A__ : Dict ="nlvr" A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase ) # Load State Dict A__ : int =load_state_dict(UpperCamelCase ) A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase ) if model_type == "pretraining": A__ : str =VisualBertForPreTraining(UpperCamelCase ) elif model_type == "vqa": A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase ) elif model_type == "nlvr": A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase ) elif model_type == "multichoice": A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) # Save Checkpoints Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": __A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") __A : str = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
656
0
'''simple docstring''' from PIL import Image def _A ( A__ , A__ ): """simple docstring""" __lowercase = (259 * (level + 255)) / (255 * (259 - level)) def contrast(A__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(A__ ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCAmelCase__ = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
41
"""simple docstring""" __A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)} def lowercase ( UpperCamelCase : int ): """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) ) def lowercase ( ): """simple docstring""" return sum( number for number in range(1000 , 1000000 ) if number == digits_fifth_powers_sum(UpperCamelCase ) ) if __name__ == "__main__": print(solution())
656
0
'''simple docstring''' import argparse import datetime def _UpperCamelCase ( __UpperCamelCase ) -> str: lowerCamelCase_ = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } lowerCamelCase_ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(__UpperCamelCase ) < 11: raise ValueError('Must be 10 characters long' ) # Get month lowerCamelCase_ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) lowerCamelCase_ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day lowerCamelCase_ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator lowerCamelCase_ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year lowerCamelCase_ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 85_00: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation lowerCamelCase_ = datetime.date(int(__UpperCamelCase ) ,int(__UpperCamelCase ) ,int(__UpperCamelCase ) ) # Start math if m <= 2: lowerCamelCase_ = y - 1 lowerCamelCase_ = m + 12 # maths var lowerCamelCase_ = int(str(__UpperCamelCase )[:2] ) lowerCamelCase_ = int(str(__UpperCamelCase )[2:] ) lowerCamelCase_ = int(2.6 * m - 5.39 ) lowerCamelCase_ = int(c / 4 ) lowerCamelCase_ = int(k / 4 ) lowerCamelCase_ = int(d + k ) lowerCamelCase_ = int(t + u + v + x ) lowerCamelCase_ = int(z - (2 * c) ) lowerCamelCase_ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response lowerCamelCase_ = f'''Your date {date_input}, is a {days[str(__UpperCamelCase )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() A_ = argparse.ArgumentParser( description=( "Find out what day of the week nearly any date is or was. Enter " "date as a string in the mm-dd-yyyy or mm/dd/yyyy format" ) ) parser.add_argument( "date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)" ) A_ = parser.parse_args() zeller(args.date_input)
42
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __A : Optional[Any] = logging.get_logger(__name__) # General docstring __A : str = "PoolFormerConfig" # Base docstring __A : Optional[Any] = "sail/poolformer_s12" __A : List[Any] = [1, 512, 7, 7] # Image classification docstring __A : List[str] = "sail/poolformer_s12" __A : Tuple = "tabby, tabby cat" __A : Tuple = [ "sail/poolformer_s12", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ): """simple docstring""" if drop_prob == 0.0 or not training: return input A__ : Tuple =1 - drop_prob A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor return output class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ): super().__init__() A__ : Optional[int] =drop_prob def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ): return drop_path(UpperCamelCase__ , self.drop_prob , self.training ) def _UpperCAmelCase ( self : List[str] ): return "p={}".format(self.drop_prob ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): super().__init__() A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size) A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride) A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding) A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ ) A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ): A__ : List[str] =self.projection(UpperCamelCase__ ) A__ : Any =self.norm(UpperCamelCase__ ) return embeddings class __lowerCAmelCase ( nn.GroupNorm): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ): super().__init__() A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ): return self.pool(UpperCamelCase__ ) - hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ): super().__init__() A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Dict =PoolFormerDropPath(UpperCamelCase__ ) if isinstance(config.hidden_act , UpperCamelCase__ ): A__ : Tuple =ACTaFN[config.hidden_act] else: A__ : Optional[Any] =config.hidden_act def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ): A__ : Optional[Any] =self.conva(UpperCamelCase__ ) A__ : List[str] =self.act_fn(UpperCamelCase__ ) A__ : List[str] =self.drop(UpperCamelCase__ ) A__ : Optional[int] =self.conva(UpperCamelCase__ ) A__ : Optional[Any] =self.drop(UpperCamelCase__ ) return hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ): super().__init__() A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ ) A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) # Useful for training neural nets A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity() A__ : Optional[Any] =config.use_layer_scale if config.use_layer_scale: A__ : List[str] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) A__ : List[Any] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ): if self.use_layer_scale: A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) ) A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : Tuple =() A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) ) A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection A__ : str =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : List[Any] =(output,) + outputs return outputs else: A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) ) # First residual connection A__ : Optional[Any] =pooling_output + hidden_states A__ : Tuple =() # Second residual connection inside the PoolFormerOutput block A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) ) A__ : Any =hidden_states + layer_output A__ : Tuple =(output,) + outputs return outputs class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : List[str] ): super().__init__() A__ : Tuple =config # stochastic depth decay rule A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings A__ : Tuple =[] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) A__ : List[str] =nn.ModuleList(UpperCamelCase__ ) # Transformer blocks A__ : Union[str, Any] =[] A__ : Any =0 for i in range(config.num_encoder_blocks ): # each block consists of layers A__ : Union[str, Any] =[] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(UpperCamelCase__ ) ) A__ : str =nn.ModuleList(UpperCamelCase__ ) def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ): A__ : Union[str, Any] =() if output_hidden_states else None A__ : Dict =pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): A__ , A__ : List[Any] =layers # Get patch embeddings from hidden_states A__ : Any =embedding_layer(UpperCamelCase__ ) # Send the embeddings through the blocks for _, blk in enumerate(UpperCamelCase__ ): A__ : List[str] =blk(UpperCamelCase__ ) A__ : Tuple =layer_outputs[0] if output_hidden_states: A__ : List[Any] =all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : List[str] = PoolFormerConfig __magic_name__ : int = """poolformer""" __magic_name__ : Any = """pixel_values""" __magic_name__ : Any = True def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ): if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCamelCase__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): A__ : Optional[Any] =value __A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n" @add_start_docstrings( """The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : Dict ): super().__init__(UpperCamelCase__ ) A__ : List[Any] =config A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ ) # Initialize weights and apply final processing self.post_init() def _UpperCAmelCase ( self : Tuple ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : int =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) A__ : List[Any] =self.encoder( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : int =encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ): super().__init__() A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size ) def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ): A__ : int =self.dense(UpperCamelCase__ ) return output @add_start_docstrings( """ PoolFormer Model transformer with an image classification head on top """ , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : str ): super().__init__(UpperCamelCase__ ) A__ : List[str] =config.num_labels A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ ) # Final norm A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head A__ : Dict =( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict A__ : List[str] =self.poolformer( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : str =outputs[0] A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) ) A__ : Optional[Any] =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A__ : int ="regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A__ : Tuple ="single_label_classification" else: A__ : Optional[int] ="multi_label_classification" if self.config.problem_type == "regression": A__ : Dict =MSELoss() if self.num_labels == 1: A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() ) else: A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) elif self.config.problem_type == "single_label_classification": A__ : Tuple =CrossEntropyLoss() A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A__ : List[Any] =BCEWithLogitsLoss() A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) if not return_dict: A__ : Optional[int] =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
656
0
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(SCREAMING_SNAKE_CASE ) == 1: return True lowercase__ = series[1] - series[0] for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''Input list must be a non empty list''' ) lowercase__ = 0 for val in series: answer += val return answer / len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : int = IFInpaintingSuperResolutionPipeline __magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""}) __magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def _UpperCAmelCase ( self : Union[str, Any] ): return self._get_superresolution_dummy_components() def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ): if str(UpperCamelCase__ ).startswith("mps" ): A__ : Any =torch.manual_seed(UpperCamelCase__ ) else: A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : List[str] ={ "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _UpperCAmelCase ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _UpperCAmelCase ( self : int ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _UpperCAmelCase ( self : Tuple ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _UpperCAmelCase ( self : str ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _UpperCAmelCase ( self : Dict ): self._test_save_load_local() def _UpperCAmelCase ( self : Optional[int] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
656
0
'''simple docstring''' import functools def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ): """simple docstring""" if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(_lowerCAmelCase ) == 0: return 0 if min(_lowerCAmelCase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(_lowerCAmelCase ) >= 366: raise ValueError("All days elements should be less than 366" ) _lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase ) @functools.cache def dynamic_programming(_lowerCAmelCase : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
44
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A : Any = { "configuration_efficientformer": [ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientFormerConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["EfficientFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", "EfficientFormerPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", "TFEfficientFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
656
0
from __future__ import annotations UpperCamelCase = 1.6021E-19 # units = C def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
45
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Tuple =[] for _ in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Dict =[] for step in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" ) torch.save(scheduler.state_dict() , UpperCamelCase ) A__ : Dict =torch.load(UpperCamelCase ) scheduler.load_state_dict(UpperCamelCase ) return lrs @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] ) A__ : Any =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _UpperCAmelCase ( self : Dict ): A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Dict =torch.tensor([0.4, 0.2, -0.5] ) A__ : Optional[int] =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : int =Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , ) for _ in range(1000 ): A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' __magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None __magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None __magic_name__ : Union[str, Any] = 10 def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A__ : Union[str, Any] ={ get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A__ , A__ : Any =data A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : str ): A__ : int =fn def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ): return self.fn(*UpperCamelCase__ , **UpperCamelCase__ ) @classmethod def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ): A__ : str =list(map(self , scheduler.lr_lambdas ) )
656
0
"""simple docstring""" import os from distutils.util import strtobool def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' for e in env_keys: _lowerCamelCase : List[Any] = int(os.environ.get(_lowerCamelCase , -1 ) ) if val >= 0: return val return default def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Any = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) ) return strtobool(_lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int... def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="no" ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) ) return value
46
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A : List[Any] = logging.get_logger("transformers.models.speecht5") __A : Optional[Any] = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A : Optional[int] = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A : List[str] = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A : List[Any] = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A : Union[str, Any] = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A : Any = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A : Union[str, Any] = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A : Optional[int] = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A : Union[str, Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A : Optional[Any] = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : Optional[int] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : int = [] __A : int = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A : Optional[Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A : Tuple = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A : Union[str, Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ): """simple docstring""" for attribute in key.split("." ): A__ : Dict =getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape else: A__ : Tuple =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": A__ : Any =value elif weight_type == "weight_g": A__ : Any =value elif weight_type == "weight_v": A__ : Any =value elif weight_type == "bias": A__ : Tuple =value elif weight_type == "running_mean": A__ : Dict =value elif weight_type == "running_var": A__ : List[str] =value elif weight_type == "num_batches_tracked": A__ : Dict =value else: A__ : Optional[int] =value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ): """simple docstring""" for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A__ , A__ : List[str] =key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ): """simple docstring""" A__ : Tuple =[] if task == "s2t": A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder A__ : int =MAPPING_S2T A__ : List[Any] =IGNORE_KEYS_S2T elif task == "t2s": A__ : Union[str, Any] =None A__ : List[Any] =MAPPING_T2S A__ : Tuple =IGNORE_KEYS_T2S elif task == "s2s": A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder A__ : Tuple =MAPPING_S2S A__ : Any =IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase , UpperCamelCase ): logger.info(F'''{name} was ignored''' ) continue A__ : Optional[Any] =False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , ) A__ : List[Any] =True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: A__ , A__ : Dict =key.split(".*." ) if prefix in name and suffix in name: A__ : int =suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: A__ : List[Any] =True if "*" in mapped_key: A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2] A__ : int =mapped_key.replace("*" , UpperCamelCase ) if "weight_g" in name: A__ : str ="weight_g" elif "weight_v" in name: A__ : Optional[Any] ="weight_v" elif "bias" in name: A__ : Any ="bias" elif "weight" in name: A__ : Optional[int] ="weight" elif "running_mean" in name: A__ : Tuple ="running_mean" elif "running_var" in name: A__ : Optional[int] ="running_var" elif "num_batches_tracked" in name: A__ : str ="num_batches_tracked" else: A__ : List[Any] =None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ): """simple docstring""" A__ : Any =full_name.split("conv_layers." )[-1] A__ : Dict =name.split("." ) A__ : int =int(items[0] ) A__ : str =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A__ : Optional[Any] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A__ : Optional[int] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase ) @torch.no_grad() def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ): """simple docstring""" if config_path is not None: A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase ) else: A__ : Any =SpeechTaConfig() if task == "s2t": A__ : Union[str, Any] =config.max_text_positions A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase ) elif task == "t2s": A__ : str =1876 A__ : Optional[int] =600 A__ : Tuple =config.max_speech_positions A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase ) elif task == "s2s": A__ : str =1876 A__ : Tuple =config.max_speech_positions A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) A__ : int =mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) A__ : Dict =SpeechTaFeatureExtractor() A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) A__ : Union[str, Any] =torch.load(UpperCamelCase ) recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase ) model.push_to_hub(UpperCamelCase ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A : str = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
656
0
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCamelCase( __lowerCamelCase ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=1_3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[str]=9_9 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : List[Any]="last" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : str=None , ): '''simple docstring''' __a : Dict = parent __a : Dict = batch_size __a : Union[str, Any] = seq_length __a : Union[str, Any] = is_training __a : List[Any] = use_input_lengths __a : Any = use_token_type_ids __a : Any = use_labels __a : Optional[Any] = gelu_activation __a : List[Any] = sinusoidal_embeddings __a : int = causal __a : Tuple = asm __a : List[str] = n_langs __a : Tuple = vocab_size __a : Dict = n_special __a : int = hidden_size __a : Dict = num_hidden_layers __a : Dict = num_attention_heads __a : str = hidden_dropout_prob __a : Optional[Any] = attention_probs_dropout_prob __a : List[str] = max_position_embeddings __a : Tuple = type_vocab_size __a : str = type_sequence_label_size __a : Dict = initializer_range __a : int = num_labels __a : Optional[int] = num_choices __a : List[Any] = summary_type __a : Any = use_proj __a : Any = scope def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __a : List[Any] = None if self.use_input_lengths: __a : List[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __a : str = None if self.use_token_type_ids: __a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __a : int = None __a : str = None __a : Dict = None if self.use_labels: __a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[Any] = ids_tensor([self.batch_size] , 2 ).float() __a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __a : Dict = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , ): '''simple docstring''' __a : Tuple = FlaubertModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __a : List[Any] = model(SCREAMING_SNAKE_CASE__ , lengths=SCREAMING_SNAKE_CASE__ , langs=SCREAMING_SNAKE_CASE__ ) __a : int = model(SCREAMING_SNAKE_CASE__ , langs=SCREAMING_SNAKE_CASE__ ) __a : int = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ): '''simple docstring''' __a : Any = FlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __a : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Union[str, Any] = FlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __a : int = model(SCREAMING_SNAKE_CASE__ ) __a : Tuple = model(SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , ): '''simple docstring''' __a : Tuple = FlaubertForQuestionAnswering(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __a : List[Any] = model(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = model( SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , cls_index=SCREAMING_SNAKE_CASE__ , is_impossible=SCREAMING_SNAKE_CASE__ , p_mask=SCREAMING_SNAKE_CASE__ , ) __a : List[Any] = model( SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , cls_index=SCREAMING_SNAKE_CASE__ , is_impossible=SCREAMING_SNAKE_CASE__ , ) ((__a) , ) : Optional[Any] = result_with_labels.to_tuple() __a : List[Any] = model(SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ ) ((__a) , ) : List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ): '''simple docstring''' __a : Optional[Any] = FlaubertForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __a : List[str] = model(SCREAMING_SNAKE_CASE__ ) __a : int = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , ): '''simple docstring''' __a : List[Any] = self.num_labels __a : str = FlaubertForTokenClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __a : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , ): '''simple docstring''' __a : Any = self.num_choices __a : List[str] = FlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __a : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : List[Any] = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : str = config_and_inputs __a : List[str] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class _UpperCamelCase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : str = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = ( { '''feature-extraction''': FlaubertModel, '''fill-mask''': FlaubertWithLMHeadModel, '''question-answering''': FlaubertForQuestionAnsweringSimple, '''text-classification''': FlaubertForSequenceClassification, '''token-classification''': FlaubertForTokenClassification, '''zero-shot''': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' __a : List[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": __a : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) __a : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' __a : Union[str, Any] = FlaubertModelTester(self ) __a : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , emb_dim=3_7 ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*SCREAMING_SNAKE_CASE__ ) @slow def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Dict = FlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow @require_torch_gpu def __lowerCAmelCase ( self : int ): '''simple docstring''' __a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return __a : str = True __a : Tuple = model_class(config=SCREAMING_SNAKE_CASE__ ) __a : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = torch.jit.trace( SCREAMING_SNAKE_CASE__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , 'traced_model.pt' ) ) __a : Tuple = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE__ , 'traced_model.pt' ) , map_location=SCREAMING_SNAKE_CASE__ ) loaded(inputs_dict['input_ids'].to(SCREAMING_SNAKE_CASE__ ) , inputs_dict['attention_mask'].to(SCREAMING_SNAKE_CASE__ ) ) @require_torch class _UpperCamelCase( unittest.TestCase ): @slow def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' __a : List[str] = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' ) __a : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): __a : str = model(SCREAMING_SNAKE_CASE__ )[0] __a : Optional[Any] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) __a : Any = torch.tensor( [[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
47
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase): '''simple docstring''' __magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ): super().__init__() A__ : Dict =prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) A__ : Optional[int] =prefix_inner_dim A__ : Optional[int] =prefix_hidden_dim A__ : Optional[int] =( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : Optional[int] =( nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : str =GPTaConfig( vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , ) A__ : Any =GPTaLMHeadModel(UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ): A__ : int =self.transformer.transformer.wte(UpperCamelCase__ ) A__ : Tuple =self.encode_prefix(UpperCamelCase__ ) A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ ) A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 ) A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ): return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ): return self.encode_prefix(UpperCamelCase__ ) @torch.no_grad() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ): A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 ) A__ : List[str] =[] A__ : Dict =[] for feature in features: A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature # Only support beam search for now A__ , A__ : Optional[Any] =self.generate_beam( input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A__ : Optional[Any] =torch.stack(UpperCamelCase__ ) A__ : Optional[int] =torch.stack(UpperCamelCase__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ): A__ : str =eos_token_id A__ : Optional[Any] =None A__ : int =None A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int ) A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool ) if input_embeds is not None: A__ : Union[str, Any] =input_embeds else: A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ ) A__ : Tuple =outputs.logits A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A__ : Optional[Any] =logits.softmax(-1 ).log() if scores is None: A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 ) A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] ) A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A__ : str =next_tokens else: A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] ) A__ : str =torch.cat((tokens, next_tokens) , dim=1 ) else: A__ : Union[str, Any] =-float(np.inf ) A__ : Dict =0 A__ : Optional[Any] =scores[:, None] + logits seq_lengths[~is_stopped] += 1 A__ : Optional[Any] =scores_sum / seq_lengths[:, None] A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 ) A__ : Tuple =next_tokens // scores_sum.shape[1] A__ : List[Any] =seq_lengths[next_tokens_source] A__ : int =next_tokens % scores_sum.shape[1] A__ : str =next_tokens.unsqueeze(1 ) A__ : List[Any] =tokens[next_tokens_source] A__ : int =torch.cat((tokens, next_tokens) , dim=1 ) A__ : List[str] =generated[next_tokens_source] A__ : Optional[Any] =scores_sum_average * seq_lengths A__ : Optional[int] =is_stopped[next_tokens_source] A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A__ : str =torch.cat((generated, next_token_embed) , dim=1 ) A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze() if is_stopped.all(): break A__ : Optional[int] =scores / seq_lengths A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ ) # tokens tensors are already padded to max_seq_length A__ : int =[tokens[i] for i in order] A__ : Any =torch.stack(UpperCamelCase__ , dim=0 ) A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
656
0
'''simple docstring''' import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase__ : Dict = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} UpperCAmelCase__ : Any = { "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } UpperCAmelCase__ : int = { "abeja/gpt-neox-japanese-2.7b": 20_48, } def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ) -> List[Any]: '''simple docstring''' with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f: lowerCAmelCase__ = json.loads(f.read() ) lowerCAmelCase__ = collections.OrderedDict() lowerCAmelCase__ = collections.OrderedDict() lowerCAmelCase__ = collections.OrderedDict() with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(UpperCamelCase_ ): lowerCAmelCase__ = b lowerCAmelCase__ = idx for wd in b: lowerCAmelCase__ = idx return vocab, raw_vocab, ids_to_tokens, emoji class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :List[str] = VOCAB_FILES_NAMES snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ :int = ['input_ids', 'attention_mask'] def __init__( self : Dict , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Dict="<|endoftext|>" , __magic_name__ : List[str]="<|endoftext|>" , __magic_name__ : Tuple="<|startoftext|>" , __magic_name__ : Optional[int]="<|endoftext|>" , __magic_name__ : List[Any]=False , **__magic_name__ : Dict , ): """simple docstring""" super().__init__( unk_token=__magic_name__ , pad_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , do_clean_text=__magic_name__ , **__magic_name__ , ) if not os.path.isfile(__magic_name__ ): raise ValueError( f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(__magic_name__ ): raise ValueError( f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) lowerCAmelCase__ = do_clean_text lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = load_vocab_and_emoji(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return len(self.raw_vocab ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int ): """simple docstring""" return self.subword_tokenizer.tokenize(__magic_name__ , clean=self.do_clean_text ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Any ): """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] ): """simple docstring""" return self.subword_tokenizer.convert_id_to_token(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = "".join(__magic_name__ ).strip() return out_string def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : "Conversation" ): """simple docstring""" lowerCAmelCase__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__magic_name__ , add_special_tokens=__magic_name__ ) + [self.eos_token_id] ) if len(__magic_name__ ) > self.model_max_length: lowerCAmelCase__ = input_ids[-self.model_max_length :] return input_ids def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" lowerCAmelCase__ = 0 if os.path.isdir(__magic_name__ ): lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: lowerCAmelCase__ = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase__ = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(__magic_name__ , "w" , encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" " Please check that the vocabulary is not corrupted!" ) lowerCAmelCase__ = token_index writer.write(",".join(__magic_name__ ) + "\n" ) index += 1 with open(__magic_name__ , "w" , encoding="utf-8" ) as writer: json.dump(self.emoji , __magic_name__ ) return vocab_file, emoji_file class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : List[str] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = vocab # same as swe lowerCAmelCase__ = ids_to_tokens # same as bpe lowerCAmelCase__ = emoji lowerCAmelCase__ = np.max([len(__magic_name__ ) for w in self.vocab.keys()] ) lowerCAmelCase__ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) lowerCAmelCase__ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) lowerCAmelCase__ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) lowerCAmelCase__ = re.compile( r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) lowerCAmelCase__ = re.compile( r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) lowerCAmelCase__ = re.compile( r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) lowerCAmelCase__ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" lowerCAmelCase__ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" lowerCAmelCase__ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self : Dict ): """simple docstring""" return len(self.ids_to_tokens ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.content_repattera.sub("<URL>" , __magic_name__ ) lowerCAmelCase__ = self.content_repattera.sub("<EMAIL>" , __magic_name__ ) lowerCAmelCase__ = self.content_repattera.sub("<TEL>" , __magic_name__ ) lowerCAmelCase__ = self.content_repattera.sub("<DATE>" , __magic_name__ ) lowerCAmelCase__ = self.content_repattera.sub("<DATE>" , __magic_name__ ) lowerCAmelCase__ = self.content_repattera.sub("<PRICE>" , __magic_name__ ) lowerCAmelCase__ = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowerCAmelCase__ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" ) return content def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Dict , __magic_name__ : Optional[int]=False ): """simple docstring""" lowerCAmelCase__ = text.replace(" " , "<SP>" ) lowerCAmelCase__ = text.replace(" " , "<SP>" ) lowerCAmelCase__ = text.replace("\r\n" , "<BR>" ) lowerCAmelCase__ = text.replace("\n" , "<BR>" ) lowerCAmelCase__ = text.replace("\r" , "<BR>" ) lowerCAmelCase__ = text.replace("\t" , "<TAB>" ) lowerCAmelCase__ = text.replace("—" , "ー" ) lowerCAmelCase__ = text.replace("−" , "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: lowerCAmelCase__ = text.replace(__magic_name__ , __magic_name__ ) if clean: lowerCAmelCase__ = self.clean_text(__magic_name__ ) def check_simbol(__magic_name__ : int ): lowerCAmelCase__ = x.encode() if len(__magic_name__ ) == 1 and len(__magic_name__ ) == 2: lowerCAmelCase__ = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0XC2A1 and c <= 0XC2BF) or (c >= 0XC780 and c <= 0XC783) or (c >= 0XCAB9 and c <= 0XCBBF) or (c >= 0XCC80 and c <= 0XCDA2) ): return True return False def checkuae(__magic_name__ : int ): lowerCAmelCase__ = x.encode() if len(__magic_name__ ) == 1 and len(__magic_name__ ) == 3: lowerCAmelCase__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0XE2_8080 and c <= 0XE2_B07F: return True return False lowerCAmelCase__ = 0 lowerCAmelCase__ = [] while pos < len(__magic_name__ ): lowerCAmelCase__ = min(len(__magic_name__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 lowerCAmelCase__ = [] # (token_id, token, pos) for e in range(__magic_name__ , __magic_name__ , -1 ): lowerCAmelCase__ = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__magic_name__ ) > 2: lowerCAmelCase__ = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(__magic_name__ ) > 0: # the smallest token_id is adopted lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = sorted(__magic_name__ , key=lambda __magic_name__ : x[0] )[0] result.append(__magic_name__ ) lowerCAmelCase__ = e else: lowerCAmelCase__ = pos + 1 lowerCAmelCase__ = text[pos:end] if check_simbol(__magic_name__ ): result.append("<KIGOU>" ) elif checkuae(__magic_name__ ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) lowerCAmelCase__ = end return result def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str , __magic_name__ : str="\n" ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = [] lowerCAmelCase__ = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(__magic_name__ ) > 0: words.append(bytearray(__magic_name__ ).decode("utf-8" , errors="replace" ) ) lowerCAmelCase__ = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(__magic_name__ ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(__magic_name__ ) if len(__magic_name__ ) > 0: words.append(bytearray(__magic_name__ ).decode("utf-8" , errors="replace" ) ) lowerCAmelCase__ = "".join(__magic_name__ ) return text
48
"""simple docstring""" import os def lowercase ( ): """simple docstring""" A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) ) A__ : str =os.path.join(UpperCamelCase , "triangle.txt" ) with open(UpperCamelCase ) as f: A__ : Optional[int] =f.readlines() A__ : str =[] for line in triangle: A__ : Union[str, Any] =[] for number in line.strip().split(" " ): numbers_from_line.append(int(UpperCamelCase ) ) a.append(UpperCamelCase ) for i in range(1 , len(UpperCamelCase ) ): for j in range(len(a[i] ) ): A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0 A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(UpperCamelCase , UpperCamelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
656
0
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ): if len(snake_case_ ) == 0: return False __UpperCAmelCase = len(snake_case_ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , snake_case_ ) else: return binary_search(a_list[midpoint + 1 :] , snake_case_ ) if __name__ == "__main__": _lowercase : str = input('Enter numbers separated by comma:\n').strip() _lowercase : int = [int(item.strip()) for item in user_input.split(',')] _lowercase : str = int(input('Enter the number to be found in the list:\n').strip()) _lowercase : Union[str, Any] = '' if binary_search(sequence, target) else 'not ' print(f"""{target} was {not_str}found in {sequence}""")
49
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A : int = logging.get_logger(__name__) def lowercase ( UpperCamelCase : Any ): """simple docstring""" A__ : str =OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): A__ : Dict =key.replace("module.encoder" , "glpn.encoder" ) if key.startswith("module.decoder" ): A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )] A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' ) if "norm" in key: A__ : Dict =key.replace("norm" , "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' ) if "layer_norm1" in key: A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 A__ : int =key[key.find("block" ) + len("block" )] A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' ) if "attn.q" in key: A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: A__ : str =key.replace("attn" , "attention.self" ) if "fc1" in key: A__ : Dict =key.replace("fc1" , "dense1" ) if "fc2" in key: A__ : str =key.replace("fc2" , "dense2" ) if "linear_pred" in key: A__ : List[Any] =key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" ) A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A__ : str =key[key.find("linear_c" ) + len("linear_c" )] A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' ) if "bot_conv" in key: A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" ) if "skip_conv1" in key: A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" ) if "skip_conv2" in key: A__ : int =key.replace("skip_conv2" , "2.convolution" ) if "fusion1" in key: A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" ) if "fusion2" in key: A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" ) if "fusion3" in key: A__ : int =key.replace("fusion3" , "3.fusion" ) if "fusion" in key and "conv" in key: A__ : List[str] =key.replace("conv" , "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" ) A__ : int =value return new_state_dict def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ): """simple docstring""" # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict A__ : List[str] =kv_weight[ : config.hidden_sizes[i], : ] A__ : Dict =kv_bias[: config.hidden_sizes[i]] A__ : Any =kv_weight[ config.hidden_sizes[i] :, : ] A__ : Any =kv_bias[config.hidden_sizes[i] :] def lowercase ( ): """simple docstring""" A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg" A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return image @torch.no_grad() def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ): """simple docstring""" A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) A__ : str =GLPNImageProcessor() # prepare image A__ : Any =prepare_img() A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) ) # rename keys A__ : Union[str, Any] =rename_keys(UpperCamelCase ) # key and value matrices need special treatment read_in_k_v(UpperCamelCase , UpperCamelCase ) # create HuggingFace model and load state dict A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # forward pass A__ : int =model(UpperCamelCase ) A__ : Optional[Any] =outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: A__ : List[Any] =torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: A__ : Tuple =torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) A__ : str =torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) __A : Any = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
656
0
'''simple docstring''' def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCamelCase__ = (boundary[1] - boundary[0]) / steps lowerCamelCase__ = boundary[0] lowerCamelCase__ = boundary[1] lowerCamelCase__ = make_points(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = 0.0 y += (h / 2.0) * f(__lowerCAmelCase ) for i in x_i: # print(i) y += h * f(__lowerCAmelCase ) y += (h / 2.0) * f(__lowerCAmelCase ) return y def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ): lowerCamelCase__ = a + h while x < (b - h): yield x lowerCamelCase__ = x + h def A__ ( __lowerCAmelCase : Dict ): # enter your function here lowerCamelCase__ = (x - 0) * (x - 0) return y def A__ ( ): lowerCamelCase__ = 0.0 # Lower bound of integration lowerCamelCase__ = 1.0 # Upper bound of integration lowerCamelCase__ = 10.0 # define number of steps or resolution lowerCamelCase__ = [a, b] # define boundary of integration lowerCamelCase__ = method_a(__lowerCAmelCase , __lowerCAmelCase ) print(F'''y = {y}''' ) if __name__ == "__main__": main()
50
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Optional[Any] = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Union[str, Any] = """gpt_neo""" __magic_name__ : Union[str, Any] = ["""past_key_values"""] __magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ): A__ : Optional[Any] =vocab_size A__ : Dict =max_position_embeddings A__ : List[str] =hidden_size A__ : List[Any] =num_layers A__ : Tuple =num_heads A__ : List[str] =intermediate_size A__ : Tuple =window_size A__ : Dict =activation_function A__ : str =resid_dropout A__ : Union[str, Any] =embed_dropout A__ : List[str] =attention_dropout A__ : Tuple =classifier_dropout A__ : int =layer_norm_epsilon A__ : int =initializer_range A__ : str =use_cache A__ : Tuple =bos_token_id A__ : int =eos_token_id A__ : int =attention_types A__ : Any =self.expand_attention_types_params(UpperCamelCase__ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) @staticmethod def _UpperCAmelCase ( UpperCamelCase__ : List[str] ): A__ : Optional[Any] =[] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): """simple docstring""" import torch A__ : List[str] =input.size() A__ : Dict =len(UpperCamelCase ) A__ : Optional[int] =shape[dimension] A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1 A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None] A__ : Tuple =[slice(UpperCamelCase )] * rank A__ : int =indices A__ : Optional[int] =input[s] A__ : Union[str, Any] =list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase ) def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ): """simple docstring""" import torch A__ : List[str] =torch.arange(1 , UpperCamelCase ) A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =remainders == 0 A__ : str =candidates[divisor_indices] A__ : int =torch.max(UpperCamelCase ) return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' @property def _UpperCAmelCase ( self : List[Any] ): A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" ) A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"} else: A__ : Tuple ={0: "batch", 1: "sequence"} return common_inputs @property def _UpperCAmelCase ( self : List[str] ): return self._config.num_heads def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ): A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape # Not using the same length for past_key_values A__ : Union[str, Any] =seqlen + 2 A__ : List[Any] =( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A__ : Optional[Any] =[ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] A__ : Optional[Any] =common_inputs["attention_mask"] if self.use_past: A__ : Any =ordered_inputs["attention_mask"].dtype A__ : Tuple =torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self : List[str] ): return 13
656
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = ['XLNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[Any] = ['XLNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple = [ 'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLNetForMultipleChoice', 'XLNetForQuestionAnswering', 'XLNetForQuestionAnsweringSimple', 'XLNetForSequenceClassification', 'XLNetForTokenClassification', 'XLNetLMHeadModel', 'XLNetModel', 'XLNetPreTrainedModel', 'load_tf_weights_in_xlnet', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ 'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLNetForMultipleChoice', 'TFXLNetForQuestionAnsweringSimple', 'TFXLNetForSequenceClassification', 'TFXLNetForTokenClassification', 'TFXLNetLMHeadModel', 'TFXLNetMainLayer', 'TFXLNetModel', 'TFXLNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Any = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Tuple = """megatron-bert""" def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ): super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) A__ : Optional[int] =vocab_size A__ : Optional[int] =hidden_size A__ : str =num_hidden_layers A__ : Any =num_attention_heads A__ : str =hidden_act A__ : Optional[int] =intermediate_size A__ : str =hidden_dropout_prob A__ : str =attention_probs_dropout_prob A__ : List[Any] =max_position_embeddings A__ : List[Any] =type_vocab_size A__ : Tuple =initializer_range A__ : Any =layer_norm_eps A__ : Any =position_embedding_type A__ : Union[str, Any] =use_cache
656
0
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __lowercase ( unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Union[str, Any] = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) __a : Union[str, Any] = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 ) __a : List[Any] = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): for example in examples: __a : Dict = video_classifier(_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ {'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )}, {'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )}, ] , ) @require_torch def _lowerCamelCase ( self ): __a : str = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' __a : List[str] = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) __a : Tuple = pipeline( '''video-classification''' , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 ) __a : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) __a : List[Any] = video_classifier(_UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}] , ) __a : List[Any] = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [ [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}], [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}], ] , ) @require_tf def _lowerCamelCase ( self ): pass
52
"""simple docstring""" from __future__ import annotations def lowercase ( UpperCamelCase : list[float] ): """simple docstring""" if len(UpperCamelCase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) A__ : Union[str, Any] =nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
656
0
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ): assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory', [False, True] ) def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ): __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read() _check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ ) @pytest.mark.parametrize( 'features', [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ], ) def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ): __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read() _check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ ) @pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] ) def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ): __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read() _check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type', [str, list] ) def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ): if issubclass(lowerCAmelCase_, lowerCAmelCase_ ): __lowerCAmelCase = text_path elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ): __lowerCAmelCase = [text_path] __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read() _check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ ) def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ): assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory', [False, True] ) def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ): __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read() _check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ ) @pytest.mark.parametrize( 'features', [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ], ) def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ): __lowerCAmelCase = tmp_path / 'cache' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read() _check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ ) @pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] ) def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ): if split: __lowerCAmelCase = {split: text_path} else: __lowerCAmelCase = 'train' __lowerCAmelCase = {'train': text_path, 'test': text_path} __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read() _check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
53
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A : Optional[Any] = { "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", "MegaForCausalLM", "MegaForMaskedLM", "MegaForMultipleChoice", "MegaForQuestionAnswering", "MegaForSequenceClassification", "MegaForTokenClassification", "MegaModel", "MegaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
656
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : Tuple =logging.get_logger(__name__) __lowercase : str ={ """microsoft/swinv2-tiny-patch4-window8-256""": ( """https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json""" ), } class A ( __lowercase ): _snake_case ='''swinv2''' _snake_case ={ '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self: Any , _lowerCAmelCase: List[str]=224 , _lowerCAmelCase: Union[str, Any]=4 , _lowerCAmelCase: Tuple=3 , _lowerCAmelCase: Optional[int]=96 , _lowerCAmelCase: str=[2, 2, 6, 2] , _lowerCAmelCase: List[str]=[3, 6, 12, 24] , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Dict=4.0 , _lowerCAmelCase: str=True , _lowerCAmelCase: int=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Optional[Any]=0.1 , _lowerCAmelCase: int="gelu" , _lowerCAmelCase: int=False , _lowerCAmelCase: Optional[Any]=0.02 , _lowerCAmelCase: Union[str, Any]=1e-5 , _lowerCAmelCase: str=32 , **_lowerCAmelCase: int , ) -> int: '''simple docstring''' super().__init__(**_lowerCAmelCase ) UpperCAmelCase_ =image_size UpperCAmelCase_ =patch_size UpperCAmelCase_ =num_channels UpperCAmelCase_ =embed_dim UpperCAmelCase_ =depths UpperCAmelCase_ =len(_lowerCAmelCase ) UpperCAmelCase_ =num_heads UpperCAmelCase_ =window_size UpperCAmelCase_ =mlp_ratio UpperCAmelCase_ =qkv_bias UpperCAmelCase_ =hidden_dropout_prob UpperCAmelCase_ =attention_probs_dropout_prob UpperCAmelCase_ =drop_path_rate UpperCAmelCase_ =hidden_act UpperCAmelCase_ =use_absolute_embeddings UpperCAmelCase_ =layer_norm_eps UpperCAmelCase_ =initializer_range UpperCAmelCase_ =encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ =int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) ) UpperCAmelCase_ =(0, 0, 0, 0)
54
"""simple docstring""" def lowercase ( UpperCamelCase : int ): """simple docstring""" if num <= 0: raise ValueError("Input must be a positive integer" ) A__ : Union[str, Any] =[True] * (num + 1) A__ : Union[str, Any] =2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , UpperCamelCase ): A__ : str =False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[int] = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
656
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) snake_case_ = Features({"image": Image()} ) snake_case_ = Features({"labels": ClassLabel} ) snake_case_ = "image" snake_case_ = "labels" def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] ,A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) __A = copy.deepcopy(self ) __A = self.label_schema.copy() __A = features[self.label_column] __A = label_schema return task_template @property def UpperCamelCase_ ( self : Any ): return { self.image_column: "image", self.label_column: "labels", }
55
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : List[Any] ): A__ : Tuple =torch.nn.Linear(10 , 10 ) A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 ) A__ : Union[str, Any] =Accelerator() A__ : str =accelerator.prepare(UpperCamelCase__ ) try: pickle.loads(pickle.dumps(UpperCamelCase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
656
0
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a : str = logging.get_logger(__name__) _a : Optional[Any] = { "BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : int = "altclip_text_model" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=25_0002 , SCREAMING_SNAKE_CASE_ : List[Any]=1024 , SCREAMING_SNAKE_CASE_ : int=24 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : List[Any]=4096 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=514 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-05 , SCREAMING_SNAKE_CASE_ : List[str]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : List[Any]="absolute" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=768 , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = initializer_range __snake_case = initializer_factor __snake_case = layer_norm_eps __snake_case = position_embedding_type __snake_case = use_cache __snake_case = project_dim class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = "altclip_vision_model" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Tuple=224 , SCREAMING_SNAKE_CASE_ : Any=32 , SCREAMING_SNAKE_CASE_ : Any="quick_gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1.0 , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Dict: super().__init__(**SCREAMING_SNAKE_CASE_ ) __snake_case = hidden_size __snake_case = intermediate_size __snake_case = projection_dim __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = num_channels __snake_case = patch_size __snake_case = image_size __snake_case = initializer_range __snake_case = initializer_factor __snake_case = attention_dropout __snake_case = layer_norm_eps __snake_case = hidden_act @classmethod def a ( cls : str , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> "PretrainedConfig": cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ ) __snake_case , __snake_case = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __snake_case = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = "altclip" _SCREAMING_SNAKE_CASE : str = True def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2.6_5_9_2 , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]: # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __snake_case = kwargs.pop('text_config_dict' , SCREAMING_SNAKE_CASE_ ) __snake_case = kwargs.pop('vision_config_dict' , SCREAMING_SNAKE_CASE_ ) super().__init__(**SCREAMING_SNAKE_CASE_ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __snake_case = {} # This is the complete result when using `text_config_dict`. __snake_case = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __snake_case = ( f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. ' f'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: __snake_case = ( f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ' f'value `text_config["{key}"]` will be overriden.' ) logger.warning(SCREAMING_SNAKE_CASE_ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __snake_case = {} # This is the complete result when using `vision_config_dict`. __snake_case = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __snake_case = { str(SCREAMING_SNAKE_CASE_ ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __snake_case = ( f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different ' f'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: __snake_case = ( f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ' f'The value `vision_config["{key}"]` will be overriden.' ) logger.warning(SCREAMING_SNAKE_CASE_ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __snake_case = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __snake_case = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __snake_case = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ ) __snake_case = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ ) __snake_case = projection_dim __snake_case = logit_scale_init_value __snake_case = 1.0 @classmethod def a ( cls : Dict , SCREAMING_SNAKE_CASE_ : AltCLIPTextConfig , SCREAMING_SNAKE_CASE_ : AltCLIPVisionConfig , **SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ ) def a ( self : Tuple ) -> List[Any]: __snake_case = copy.deepcopy(self.__dict__ ) __snake_case = self.text_config.to_dict() __snake_case = self.vision_config.to_dict() __snake_case = self.__class__.model_type return output
56
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __A : Optional[int] = None __A : Union[str, Any] = logging.get_logger(__name__) __A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __A : str = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } __A : List[str] = { "google/bigbird-roberta-base": 4_096, "google/bigbird-roberta-large": 4_096, "google/bigbird-base-trivia-itc": 4_096, } __A : Tuple = "▁" class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Dict = VOCAB_FILES_NAMES __magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP __magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ : List[Any] = BigBirdTokenizer __magic_name__ : Any = ["""input_ids""", """attention_mask"""] __magic_name__ : List[int] = [] def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ): A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) A__ : List[Any] =vocab_file A__ : Optional[int] =False if not self.vocab_file else True def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : Tuple =[self.sep_token_id] A__ : str =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1] def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : Tuple =[self.sep_token_id] A__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return A__ : List[str] =os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
656
0
def snake_case (UpperCAmelCase__ ) -> bool: if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True UpperCamelCase_: Tuple = 4 UpperCamelCase_: str = (1 << p) - 1 for _ in range(p - 2 ): UpperCamelCase_: Union[str, Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
57
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[int] = {"vocab_file": "spiece.model"} __A : List[Any] = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ): A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) A__ : Dict =3 A__ : int =do_lower_case A__ : str =remove_space A__ : Optional[Any] =keep_accents A__ : int =vocab_file A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) A__ : Union[str, Any] =jieba A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _UpperCAmelCase ( self : Union[str, Any] ): return len(self.sp_model ) def _UpperCAmelCase ( self : Optional[int] ): A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): A__ : Union[str, Any] =self.__dict__.copy() A__ : Tuple =None return state def __setstate__( self : Tuple , UpperCamelCase__ : int ): A__ : Union[str, Any] =d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A__ : Optional[int] ={} A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ): if self.remove_space: A__ : Optional[int] =" ".join(inputs.strip().split() ) else: A__ : Optional[Any] =inputs A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ ) A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: A__ : str =outputs.lower() return outputs def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ): A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ ) A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) A__ : List[str] =[] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A__ : Union[str, Any] =cur_pieces[1:] else: A__ : List[str] =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ): return self.sp_model.PieceToId(UpperCamelCase__ ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ): return self.sp_model.IdToPiece(UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ): A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : List[str] =[self.sep_token_id] A__ : str =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : List[str] =[self.sep_token_id] A__ : Optional[Any] =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return A__ : Tuple =os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: A__ : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,) def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ): A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ ) A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
656
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=1_8 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Tuple: '''simple docstring''' snake_case_ : Any = size if size is not None else {"""shortest_edge""": 1_8} snake_case_ : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} snake_case_ : List[str] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : List[Any] = num_channels snake_case_ : Any = image_size snake_case_ : int = min_resolution snake_case_ : Tuple = max_resolution snake_case_ : Tuple = do_resize snake_case_ : int = size snake_case_ : Union[str, Any] = do_center_crop snake_case_ : Union[str, Any] = crop_size snake_case_ : Optional[int] = do_normalize snake_case_ : List[str] = image_mean snake_case_ : Optional[Any] = image_std def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _lowerCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ : Dict = LevitImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowercase , """image_mean""" ) ) self.assertTrue(hasattr(_lowercase , """image_std""" ) ) self.assertTrue(hasattr(_lowercase , """do_normalize""" ) ) self.assertTrue(hasattr(_lowercase , """do_resize""" ) ) self.assertTrue(hasattr(_lowercase , """do_center_crop""" ) ) self.assertTrue(hasattr(_lowercase , """size""" ) ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) snake_case_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , Image.Image ) # Test not batched input snake_case_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case_ : str = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , np.ndarray ) # Test not batched input snake_case_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case_ : Dict = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case_ : int = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
58
"""simple docstring""" def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" def count_of_possible_combinations(UpperCamelCase : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(UpperCamelCase ) def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" def count_of_possible_combinations_with_dp_array( UpperCamelCase : int , UpperCamelCase : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A__ : str =sum( count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase ) for item in array ) A__ : List[str] =answer return answer A__ : List[Any] =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase ) def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" A__ : str =[0] * (target + 1) A__ : Optional[Any] =1 for i in range(1 , target + 1 ): for j in range(UpperCamelCase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[Any] = 3 __A : Optional[Any] = 5 __A : int = [1, 2, 5] print(combination_sum_iv(n, array, target))
656
0
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = CpmAntTokenizer lowercase_ = False def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[str]: '''simple docstring''' super().setUp() lowerCamelCase__: Optional[int] =[ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] lowerCamelCase__: Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) @tooslow def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] =CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b") lowerCamelCase__: Union[str, Any] ="今天天气真好!" lowerCamelCase__: int =["今天", "天气", "真", "好", "!"] lowerCamelCase__: List[str] =tokenizer.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Dict ="今天天气真好!" lowerCamelCase__: str =[tokenizer.bos_token] + tokens lowerCamelCase__: Union[str, Any] =[6, 9_802, 14_962, 2_082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_) lowerCamelCase__: Tuple =tokenizer.decode(UpperCAmelCase_) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
59
"""simple docstring""" import math import tensorflow as tf from packaging import version def lowercase ( UpperCamelCase : Optional[Any] ): """simple docstring""" A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowercase ( UpperCamelCase : Optional[int] ): """simple docstring""" A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : Tuple =tf.cast(math.pi , x.dtype ) A__ : Dict =tf.cast(0.04_47_15 , x.dtype ) A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) )) return x * cdf def lowercase ( UpperCamelCase : Optional[int] ): """simple docstring""" A__ : List[str] =tf.convert_to_tensor(UpperCamelCase ) return x * tf.tanh(tf.math.softplus(UpperCamelCase ) ) def lowercase ( UpperCamelCase : List[str] ): """simple docstring""" A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype ) A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowercase ( UpperCamelCase : List[Any] ): """simple docstring""" A__ : List[str] =tf.convert_to_tensor(UpperCamelCase ) A__ : str =tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowercase ( UpperCamelCase : Tuple ): """simple docstring""" return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 ) def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ): """simple docstring""" A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase ) return a * tf.math.sigmoid(UpperCamelCase ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def lowercase ( UpperCamelCase : int ): """simple docstring""" return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase ) __A : Optional[Any] = tf.keras.activations.gelu __A : Optional[Any] = approximate_gelu_wrap else: __A : Any = _gelu __A : Union[str, Any] = _gelu_new __A : List[str] = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def lowercase ( UpperCamelCase : List[Any] ): """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
656
0
import tensorflow as tf from ...tf_utils import shape_list class __lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[Any] = vocab_size snake_case_ : Dict = d_embed snake_case_ : Union[str, Any] = d_proj snake_case_ : str = cutoffs + [vocab_size] snake_case_ : int = [0] + self.cutoffs snake_case_ : Optional[int] = div_val snake_case_ : int = self.cutoffs[0] snake_case_ : Any = len(self.cutoffs ) - 1 snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters snake_case_ : str = keep_order snake_case_ : int = [] snake_case_ : Union[str, Any] = [] def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' if self.n_clusters > 0: snake_case_ : Tuple = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: snake_case_ : List[str] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , ) self.out_projs.append(__magic_name__ ) else: self.out_projs.append(__magic_name__ ) snake_case_ : Optional[Any] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : List[str] = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i) snake_case_ : int = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' ) self.out_projs.append(__magic_name__ ) snake_case_ : int = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , ) snake_case_ : Any = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) super().build(__magic_name__ ) @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = x if proj is not None: snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ ) return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = shape_list(__magic_name__ ) snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype ) snake_case_ : Dict = tf.stack([r, target] , 1 ) return tf.gather_nd(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = 0 if self.n_clusters == 0: snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ ) snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 ) else: snake_case_ : Optional[int] = shape_list(__magic_name__ ) snake_case_ : int = [] snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: snake_case_ : str = (target >= l_idx) & (target < r_idx) snake_case_ : Dict = tf.where(__magic_name__ ) snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx if self.div_val == 1: snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx] snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx] else: snake_case_ : Union[str, Any] = self.out_layers[i][0] snake_case_ : int = self.out_layers[i][1] if i == 0: snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 ) snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 ) snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] ) snake_case_ : Any = tf.nn.log_softmax(__magic_name__ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ ) else: snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] ) snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ ) snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__magic_name__ ) if target is not None: snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ ) snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) ) snake_case_ : str = tf.concat(__magic_name__ , axis=-1 ) if target is not None: if return_mean: snake_case_ : int = tf.reduce_mean(__magic_name__ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__magic_name__ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' ) return out
60
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict ): A__ : Optional[Any] =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ): A__ : Tuple =parent A__ : List[Any] =batch_size A__ : List[Any] =image_size A__ : Union[str, Any] =num_channels A__ : Optional[int] =num_encoder_blocks A__ : Any =sr_ratios A__ : Any =depths A__ : List[Any] =hidden_sizes A__ : List[Any] =downsampling_rates A__ : List[str] =num_attention_heads A__ : int =is_training A__ : List[Any] =use_labels A__ : Any =hidden_act A__ : Dict =hidden_dropout_prob A__ : int =attention_probs_dropout_prob A__ : List[Any] =initializer_range A__ : Tuple =num_labels A__ : List[Any] =scope def _UpperCAmelCase ( self : Optional[int] ): A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ : Any =None if self.use_labels: A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A__ : List[Any] =self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self : Tuple ): return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ): A__ : Any =SegformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : Dict =model(UpperCamelCase__ ) A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ): A__ : str =self.num_labels A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : Optional[Any] =model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ): A__ : Tuple =1 A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ ) A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertGreater(result.loss , 0.0 ) def _UpperCAmelCase ( self : str ): A__ : Union[str, Any] =self.prepare_config_and_inputs() A__ , A__ , A__ : Tuple =config_and_inputs A__ : Tuple ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : Dict = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) __magic_name__ : Optional[int] = ( { """feature-extraction""": SegformerModel, """image-classification""": SegformerForImageClassification, """image-segmentation""": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ : Dict = True __magic_name__ : List[str] = False __magic_name__ : Optional[Any] = False __magic_name__ : str = False def _UpperCAmelCase ( self : Union[str, Any] ): A__ : Union[str, Any] =SegformerModelTester(self ) A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ ) def _UpperCAmelCase ( self : str ): self.config_tester.run_common_tests() def _UpperCAmelCase ( self : Dict ): A__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] ): A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ ) @unittest.skip("SegFormer does not use inputs_embeds" ) def _UpperCAmelCase ( self : Dict ): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" ) def _UpperCAmelCase ( self : Tuple ): pass def _UpperCAmelCase ( self : List[str] ): A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : int =model_class(UpperCamelCase__ ) A__ : Optional[int] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ : Optional[int] =[*signature.parameters.keys()] A__ : List[str] =["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _UpperCAmelCase ( self : str ): A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() A__ : Union[str, Any] =True for model_class in self.all_model_classes: A__ : Optional[Any] =True A__ : Union[str, Any] =False A__ : str =True A__ : Optional[int] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Any =outputs.attentions A__ : List[str] =sum(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ : Dict =True A__ : str =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Union[str, Any] =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first attentions (first block, first layer) A__ : List[Any] =(self.model_tester.image_size // 4) ** 2 A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) A__ : Tuple =(self.model_tester.image_size // 32) ** 2 A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) A__ : int =len(UpperCamelCase__ ) # Check attention is always last and order is fine A__ : Optional[Any] =True A__ : Any =True A__ : Union[str, Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) ) A__ : Optional[Any] =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first attentions (first block, first layer) A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2 A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def _UpperCAmelCase ( self : List[Any] ): def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ): A__ : Optional[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Optional[Any] =outputs.hidden_states A__ : int =self.model_tester.num_encoder_blocks self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : Optional[Any] =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ : str =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[int] ): if not self.model_tester.is_training: return A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common() A__ : List[Any] =True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase__ ): continue A__ : List[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss loss.backward() @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _UpperCAmelCase ( self : Tuple ): pass @slow def _UpperCAmelCase ( self : Tuple ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowercase ( ): """simple docstring""" A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @slow def _UpperCAmelCase ( self : Tuple ): # only resize + normalize A__ : List[Any] =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCamelCase__ ) A__ : Union[str, Any] =prepare_img() A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : int =model(UpperCamelCase__ ) A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A__ : Optional[int] =torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _UpperCAmelCase ( self : Union[str, Any] ): # only resize + normalize A__ : Dict =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : int =SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ ) A__ : Tuple =prepare_img() A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : int =model(UpperCamelCase__ ) A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A__ : List[Any] =torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) ) @slow def _UpperCAmelCase ( self : int ): # only resize + normalize A__ : Optional[Any] =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCamelCase__ ) A__ : str =prepare_img() A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : Dict =model(UpperCamelCase__ ) A__ : Any =outputs.logits.detach().cpu() A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] ) A__ : List[str] =torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) A__ : Tuple =torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
656
0
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCamelCase = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCamelCase ( datasets.BuilderConfig ): """simple docstring""" snake_case__ = None snake_case__ = "utf-8" snake_case__ = None snake_case__ = None snake_case__ = True # deprecated snake_case__ = None # deprecated snake_case__ = 1_0 << 2_0 # 10MB snake_case__ = None class __lowerCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" snake_case__ = JsonConfig def a ( self : Optional[int] ) -> Optional[Any]: if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" ) lowerCAmelCase__ = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" ) return datasets.DatasetInfo(features=self.config.features ) def a ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) lowerCAmelCase__ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE__ , (str, list, tuple) ): lowerCAmelCase__ = data_files if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = [files] lowerCAmelCase__ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] lowerCAmelCase__ = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = [files] lowerCAmelCase__ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ , gen_kwargs={"files": files} ) ) return splits def a ( self : str , SCREAMING_SNAKE_CASE__ : pa.Table ) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase__ = self.config.features.arrow_schema.field(SCREAMING_SNAKE_CASE__ ).type lowerCAmelCase__ = pa_table.append_column(SCREAMING_SNAKE_CASE__ , pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=SCREAMING_SNAKE_CASE__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase__ = table_cast(SCREAMING_SNAKE_CASE__ , self.config.features.arrow_schema ) return pa_table def a ( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> str: for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(SCREAMING_SNAKE_CASE__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ ) # We keep only the field we are interested in lowerCAmelCase__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ): lowerCAmelCase__ = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase__ = {col: [row.get(SCREAMING_SNAKE_CASE__ ) for row in dataset] for col in keys} else: lowerCAmelCase__ = dataset lowerCAmelCase__ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE__ ) yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE__ ) # If the file has one json object per line else: with open(SCREAMING_SNAKE_CASE__ , "rb" ) as f: lowerCAmelCase__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase__ = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: lowerCAmelCase__ = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(SCREAMING_SNAKE_CASE__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase__ = batch.decode(self.config.encoding , errors=SCREAMING_SNAKE_CASE__ ).encode("utf-8" ) try: while True: try: lowerCAmelCase__ = paj.read_json( io.BytesIO(SCREAMING_SNAKE_CASE__ ) , read_options=paj.ReadOptions(block_size=SCREAMING_SNAKE_CASE__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(SCREAMING_SNAKE_CASE__ , pa.ArrowInvalid ) and "straddling" not in str(SCREAMING_SNAKE_CASE__ ) or block_size > len(SCREAMING_SNAKE_CASE__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(SCREAMING_SNAKE_CASE__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( SCREAMING_SNAKE_CASE__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE__ )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # list is the only sequence type supported in JSON try: lowerCAmelCase__ = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase__ = {col: [row.get(SCREAMING_SNAKE_CASE__ ) for row in dataset] for col in keys} lowerCAmelCase__ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE__ )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE__ ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE__ )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE__ ) batch_idx += 1
61
"""simple docstring""" import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ): A__ : str =parent A__ : List[str] =batch_size A__ : Any =seq_length A__ : List[str] =is_training A__ : List[Any] =use_attention_mask A__ : List[Any] =use_token_type_ids A__ : Dict =use_labels A__ : List[Any] =vocab_size A__ : Optional[int] =hidden_size A__ : Optional[Any] =num_hidden_layers A__ : str =num_attention_heads A__ : int =intermediate_size A__ : Tuple =hidden_act A__ : Tuple =hidden_dropout_prob A__ : Dict =attention_probs_dropout_prob A__ : Any =max_position_embeddings A__ : Any =type_vocab_size A__ : Union[str, Any] =type_sequence_label_size A__ : Optional[Any] =initializer_range A__ : int =num_choices def _UpperCAmelCase ( self : Tuple ): A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : List[str] =None if self.use_attention_mask: A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] ) A__ : str =None if self.use_token_type_ids: A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Any =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self : Tuple ): A__ : Dict =self.prepare_config_and_inputs() A__ , A__ , A__ , A__ : str =config_and_inputs A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _UpperCAmelCase ( self : int ): A__ : str =self.prepare_config_and_inputs() A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs A__ : Union[str, Any] =True A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : Union[str, Any] = True __magic_name__ : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self : Optional[int] ): A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self ) @slow def _UpperCAmelCase ( self : List[Any] ): for model_class_name in self.all_model_classes: A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : Union[str, Any] =model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @slow def _UpperCAmelCase ( self : Tuple ): A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) A__ : str =model(UpperCamelCase__ )[0] A__ : List[Any] =[1, 11, 50265] self.assertEqual(list(output.shape ) , UpperCamelCase__ ) # compare the actual values for a slice. A__ : Any =np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _UpperCAmelCase ( self : List[Any] ): A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) A__ : Dict =model(UpperCamelCase__ )[0] # compare the actual values for a slice. A__ : Optional[Any] =np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
656
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) snake_case = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = '''unispeech''' def __init__( self : List[str] , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Any=3072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-5 , UpperCAmelCase_ : List[Any]="group" , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : List[str]=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : Tuple=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Dict=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[str]=128 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : int=0.05 , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : int=320 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Any=100 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Dict="mean" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[str]=256 , UpperCAmelCase_ : str=80 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Tuple , ): super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = conv_bias SCREAMING_SNAKE_CASE : Dict = num_conv_pos_embeddings SCREAMING_SNAKE_CASE : Optional[int] = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE : List[Any] = len(self.conv_dim ) SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout SCREAMING_SNAKE_CASE : Dict = attention_dropout SCREAMING_SNAKE_CASE : Tuple = activation_dropout SCREAMING_SNAKE_CASE : Dict = feat_proj_dropout SCREAMING_SNAKE_CASE : str = final_dropout SCREAMING_SNAKE_CASE : Dict = layerdrop SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE : int = initializer_range SCREAMING_SNAKE_CASE : Any = num_ctc_classes SCREAMING_SNAKE_CASE : Dict = vocab_size SCREAMING_SNAKE_CASE : Tuple = do_stable_layer_norm SCREAMING_SNAKE_CASE : Union[str, Any] = use_weighted_layer_sum SCREAMING_SNAKE_CASE : str = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE : int = apply_spec_augment SCREAMING_SNAKE_CASE : Tuple = mask_time_prob SCREAMING_SNAKE_CASE : Any = mask_time_length SCREAMING_SNAKE_CASE : int = mask_time_min_masks SCREAMING_SNAKE_CASE : int = mask_feature_prob SCREAMING_SNAKE_CASE : List[Any] = mask_feature_length SCREAMING_SNAKE_CASE : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE : Any = num_codevectors_per_group SCREAMING_SNAKE_CASE : Union[str, Any] = num_codevector_groups SCREAMING_SNAKE_CASE : Optional[int] = contrastive_logits_temperature SCREAMING_SNAKE_CASE : int = feat_quantizer_dropout SCREAMING_SNAKE_CASE : List[Any] = num_negatives SCREAMING_SNAKE_CASE : str = codevector_dim SCREAMING_SNAKE_CASE : Tuple = proj_codevector_dim SCREAMING_SNAKE_CASE : Tuple = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE : List[Any] = ctc_loss_reduction SCREAMING_SNAKE_CASE : List[Any] = ctc_zero_infinity # pretraining loss SCREAMING_SNAKE_CASE : str = replace_prob @property def _A ( self : int ): return functools.reduce(operator.mul , self.conv_stride , 1 )
62
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __A : List[Any] = logging.get_logger(__name__) __A : Any = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] __A : Optional[int] = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def lowercase ( UpperCamelCase : Tuple ): """simple docstring""" A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" ) return sd def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ): """simple docstring""" A__ : List[str] =OrderedDict() A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A__ : Optional[Any] =key for name_pair in rename_keys_prefix: A__ : int =new_key.replace(name_pair[0] , name_pair[1] ) A__ : Dict =d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A__ : Optional[int] =new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: A__ : Any ="pretraining" if "vcr" in checkpoint_path: A__ : Union[str, Any] ={"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: A__ : Optional[Any] ={"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: A__ : List[str] ={"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 512} A__ : List[str] ="multichoice" elif "vqa_advanced" in checkpoint_path: A__ : Any ={"visual_embedding_dim": 2048} A__ : str ="vqa_advanced" elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129} A__ : str ="vqa" elif "nlvr" in checkpoint_path: A__ : str ={ "visual_embedding_dim": 1024, "num_labels": 2, } A__ : Dict ="nlvr" A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase ) # Load State Dict A__ : int =load_state_dict(UpperCamelCase ) A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase ) if model_type == "pretraining": A__ : str =VisualBertForPreTraining(UpperCamelCase ) elif model_type == "vqa": A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase ) elif model_type == "nlvr": A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase ) elif model_type == "multichoice": A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) # Save Checkpoints Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": __A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") __A : str = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
656
0
from __future__ import annotations a : str = "Muhammad Umer Farooq" a : Tuple = "MIT" a : int = "1.0.0" a : List[str] = "Muhammad Umer Farooq" a : Union[str, Any] = "contact@muhammadumerfarooq.me" a : Tuple = "Alpha" import re from html.parser import HTMLParser from urllib import parse import requests class a ( lowercase__ ): """simple docstring""" def __init__( self : List[Any] , __lowercase : str ) -> None: super().__init__() __UpperCAmelCase : list[str] = [] __UpperCAmelCase : List[Any] = domain def UpperCAmelCase ( self : Tuple , __lowercase : str , __lowercase : list[tuple[str, str | None]] ) -> None: # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __UpperCAmelCase : Union[str, Any] = parse.urljoin(self.domain , __lowercase ) self.urls.append(__lowercase ) def lowerCamelCase__ ( __lowerCamelCase : str ): return ".".join(get_sub_domain_name(__lowerCamelCase ).split(""".""" )[-2:] ) def lowerCamelCase__ ( __lowerCamelCase : str ): return parse.urlparse(__lowerCamelCase ).netloc def lowerCamelCase__ ( __lowerCamelCase : str = "https://github.com" ): __UpperCAmelCase : Union[str, Any] = get_domain_name(__lowerCamelCase ) # Initialize the parser __UpperCAmelCase : Tuple = Parser(__lowerCamelCase ) try: # Open URL __UpperCAmelCase : Dict = requests.get(__lowerCamelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __UpperCAmelCase : Tuple = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __UpperCAmelCase : Tuple = requests.get(__lowerCamelCase ) # Get the valid email. __UpperCAmelCase : Optional[int] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(__lowerCamelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(__lowerCamelCase ) if __name__ == "__main__": a : Any = emails_from_url("https://github.com") print(f"""{len(emails)} emails found:""") print("\n".join(sorted(emails)))
63
"""simple docstring""" __A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)} def lowercase ( UpperCamelCase : int ): """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) ) def lowercase ( ): """simple docstring""" return sum( number for number in range(1000 , 1000000 ) if number == digits_fifth_powers_sum(UpperCamelCase ) ) if __name__ == "__main__": print(solution())
656
0
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) __a = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ) -> str: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= 2 SCREAMING_SNAKE_CASE__: Tuple= randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ) SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: Tuple= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> Tuple: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase_ ( self ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(lowerCAmelCase ): if isinstance(lowerCAmelCase , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) SCREAMING_SNAKE_CASE__: Any= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] ) SCREAMING_SNAKE_CASE__: int= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Any= 2 SCREAMING_SNAKE_CASE__: Tuple= [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), ] SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: int= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components() SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= 10.0 SCREAMING_SNAKE_CASE__: Any= 4 SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0] SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def UpperCamelCase_ ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components() SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(lowerCAmelCase ) except NotImplementedError: pass @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird''' SCREAMING_SNAKE_CASE__: List[str]= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: List[Any]= load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: Optional[Any]= pipe( lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0] assert image.shape == (512, 512, 3) SCREAMING_SNAKE_CASE__: str= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9e-2
64
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __A : Optional[Any] = logging.get_logger(__name__) # General docstring __A : str = "PoolFormerConfig" # Base docstring __A : Optional[Any] = "sail/poolformer_s12" __A : List[Any] = [1, 512, 7, 7] # Image classification docstring __A : List[str] = "sail/poolformer_s12" __A : Tuple = "tabby, tabby cat" __A : Tuple = [ "sail/poolformer_s12", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ): """simple docstring""" if drop_prob == 0.0 or not training: return input A__ : Tuple =1 - drop_prob A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor return output class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ): super().__init__() A__ : Optional[int] =drop_prob def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ): return drop_path(UpperCamelCase__ , self.drop_prob , self.training ) def _UpperCAmelCase ( self : List[str] ): return "p={}".format(self.drop_prob ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): super().__init__() A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size) A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride) A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding) A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ ) A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ): A__ : List[str] =self.projection(UpperCamelCase__ ) A__ : Any =self.norm(UpperCamelCase__ ) return embeddings class __lowerCAmelCase ( nn.GroupNorm): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ): super().__init__() A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ): return self.pool(UpperCamelCase__ ) - hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ): super().__init__() A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Dict =PoolFormerDropPath(UpperCamelCase__ ) if isinstance(config.hidden_act , UpperCamelCase__ ): A__ : Tuple =ACTaFN[config.hidden_act] else: A__ : Optional[Any] =config.hidden_act def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ): A__ : Optional[Any] =self.conva(UpperCamelCase__ ) A__ : List[str] =self.act_fn(UpperCamelCase__ ) A__ : List[str] =self.drop(UpperCamelCase__ ) A__ : Optional[int] =self.conva(UpperCamelCase__ ) A__ : Optional[Any] =self.drop(UpperCamelCase__ ) return hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ): super().__init__() A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ ) A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) # Useful for training neural nets A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity() A__ : Optional[Any] =config.use_layer_scale if config.use_layer_scale: A__ : List[str] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) A__ : List[Any] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ): if self.use_layer_scale: A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) ) A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : Tuple =() A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) ) A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection A__ : str =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : List[Any] =(output,) + outputs return outputs else: A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) ) # First residual connection A__ : Optional[Any] =pooling_output + hidden_states A__ : Tuple =() # Second residual connection inside the PoolFormerOutput block A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) ) A__ : Any =hidden_states + layer_output A__ : Tuple =(output,) + outputs return outputs class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : List[str] ): super().__init__() A__ : Tuple =config # stochastic depth decay rule A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings A__ : Tuple =[] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) A__ : List[str] =nn.ModuleList(UpperCamelCase__ ) # Transformer blocks A__ : Union[str, Any] =[] A__ : Any =0 for i in range(config.num_encoder_blocks ): # each block consists of layers A__ : Union[str, Any] =[] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(UpperCamelCase__ ) ) A__ : str =nn.ModuleList(UpperCamelCase__ ) def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ): A__ : Union[str, Any] =() if output_hidden_states else None A__ : Dict =pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): A__ , A__ : List[Any] =layers # Get patch embeddings from hidden_states A__ : Any =embedding_layer(UpperCamelCase__ ) # Send the embeddings through the blocks for _, blk in enumerate(UpperCamelCase__ ): A__ : List[str] =blk(UpperCamelCase__ ) A__ : Tuple =layer_outputs[0] if output_hidden_states: A__ : List[Any] =all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : List[str] = PoolFormerConfig __magic_name__ : int = """poolformer""" __magic_name__ : Any = """pixel_values""" __magic_name__ : Any = True def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ): if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCamelCase__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): A__ : Optional[Any] =value __A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n" @add_start_docstrings( """The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : Dict ): super().__init__(UpperCamelCase__ ) A__ : List[Any] =config A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ ) # Initialize weights and apply final processing self.post_init() def _UpperCAmelCase ( self : Tuple ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : int =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) A__ : List[Any] =self.encoder( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : int =encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ): super().__init__() A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size ) def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ): A__ : int =self.dense(UpperCamelCase__ ) return output @add_start_docstrings( """ PoolFormer Model transformer with an image classification head on top """ , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : str ): super().__init__(UpperCamelCase__ ) A__ : List[str] =config.num_labels A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ ) # Final norm A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head A__ : Dict =( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict A__ : List[str] =self.poolformer( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : str =outputs[0] A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) ) A__ : Optional[Any] =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A__ : int ="regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A__ : Tuple ="single_label_classification" else: A__ : Optional[int] ="multi_label_classification" if self.config.problem_type == "regression": A__ : Dict =MSELoss() if self.num_labels == 1: A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() ) else: A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) elif self.config.problem_type == "single_label_classification": A__ : Tuple =CrossEntropyLoss() A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A__ : List[Any] =BCEWithLogitsLoss() A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) if not return_dict: A__ : Optional[int] =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
656
0
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=0 ): '''simple docstring''' return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[column] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=float("""inf""" ) ): '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , __UpperCamelCase ): UpperCAmelCase__ : Optional[int] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: UpperCAmelCase__ : Any = current_dis return min_dis def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=float("""inf""" ) ): '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , __UpperCamelCase ): for j in range(max(0 , i - 6 ) , __UpperCamelCase ): UpperCAmelCase__ : List[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: UpperCAmelCase__ : List[Any] = current_dis return min_dis def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(__UpperCamelCase , __UpperCamelCase ) # recursion UpperCAmelCase__ : Any = points_counts // 2 UpperCAmelCase__ : Union[str, Any] = closest_pair_of_points_sqr( __UpperCamelCase , points_sorted_on_y[:mid] , __UpperCamelCase ) UpperCAmelCase__ : Optional[int] = closest_pair_of_points_sqr( __UpperCamelCase , points_sorted_on_y[mid:] , points_counts - mid ) UpperCAmelCase__ : Dict = min(__UpperCamelCase , __UpperCamelCase ) UpperCAmelCase__ : List[Any] = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(__UpperCamelCase ) UpperCAmelCase__ : Tuple = dis_between_closest_in_strip( __UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase ) return min(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = column_based_sort(__UpperCamelCase , column=0 ) UpperCAmelCase__ : Optional[int] = column_based_sort(__UpperCamelCase , column=1 ) return ( closest_pair_of_points_sqr( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) ** 0.5 if __name__ == "__main__": __UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('Distance:', closest_pair_of_points(points, len(points)))
65
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : int = IFInpaintingSuperResolutionPipeline __magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""}) __magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def _UpperCAmelCase ( self : Union[str, Any] ): return self._get_superresolution_dummy_components() def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ): if str(UpperCamelCase__ ).startswith("mps" ): A__ : Any =torch.manual_seed(UpperCamelCase__ ) else: A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : List[str] ={ "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _UpperCAmelCase ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _UpperCAmelCase ( self : int ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _UpperCAmelCase ( self : Tuple ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _UpperCAmelCase ( self : str ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _UpperCAmelCase ( self : Dict ): self._test_save_load_local() def _UpperCAmelCase ( self : Optional[int] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
656
0
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Optional[int] = "trajectory_transformer" _UpperCamelCase : Optional[int] = ["past_key_values"] _UpperCamelCase : Optional[Any] = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _lowerCAmelCase=1_0_0 , _lowerCAmelCase=5 , _lowerCAmelCase=1 , _lowerCAmelCase=1 , _lowerCAmelCase=2_4_9 , _lowerCAmelCase=6 , _lowerCAmelCase=1_7 , _lowerCAmelCase=2_5 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=1_2_8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.00_06 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=1 , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=5_0_2_5_6 , _lowerCAmelCase=5_0_2_5_6 , **_lowerCAmelCase , ): _lowercase : Optional[int] = vocab_size _lowercase : int = action_weight _lowercase : Optional[Any] = reward_weight _lowercase : List[Any] = value_weight _lowercase : List[str] = max_position_embeddings _lowercase : Any = block_size _lowercase : List[str] = action_dim _lowercase : str = observation_dim _lowercase : Any = transition_dim _lowercase : Tuple = learning_rate _lowercase : Tuple = n_layer _lowercase : str = n_head _lowercase : Optional[int] = n_embd _lowercase : List[Any] = embd_pdrop _lowercase : str = attn_pdrop _lowercase : Optional[Any] = resid_pdrop _lowercase : Tuple = initializer_range _lowercase : List[str] = layer_norm_eps _lowercase : str = kaiming_initializer_range _lowercase : Any = use_cache super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A : Any = { "configuration_efficientformer": [ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientFormerConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["EfficientFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", "EfficientFormerPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", "TFEfficientFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
656
0
from __future__ import annotations from typing import Any class A_ : """simple docstring""" def __init__( self : Dict ,__A : int = 6 ) -> None: _lowercase = None _lowercase = None self.create_linked_list(__A ) def __UpperCAmelCase ( self : List[str] ,__A : int ) -> None: _lowercase = Node() _lowercase = current_node _lowercase = current_node _lowercase = current_node for _ in range(1 ,__A ): _lowercase = Node() _lowercase = current_node _lowercase = previous_node _lowercase = current_node _lowercase = self.front _lowercase = previous_node def __UpperCAmelCase ( self : List[Any] ) -> bool: return ( self.front == self.rear and self.front is not None and self.front.data is None ) def __UpperCAmelCase ( self : Optional[int] ) -> Any | None: self.check_can_perform_operation() return self.front.data if self.front else None def __UpperCAmelCase ( self : Any ,__A : Any ) -> None: if self.rear is None: return self.check_is_full() if not self.is_empty(): _lowercase = self.rear.next if self.rear: _lowercase = data def __UpperCAmelCase ( self : str ) -> Any: self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: _lowercase = self.front.data _lowercase = None return data _lowercase = self.front _lowercase = old_front.next _lowercase = old_front.data _lowercase = None return data def __UpperCAmelCase ( self : int ) -> None: if self.is_empty(): raise Exception('Empty Queue' ) def __UpperCAmelCase ( self : List[Any] ) -> None: if self.rear and self.rear.next == self.front: raise Exception('Full Queue' ) class A_ : """simple docstring""" def __init__( self : Optional[Any] ) -> None: _lowercase = None _lowercase = None _lowercase = None if __name__ == "__main__": import doctest doctest.testmod()
67
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Tuple =[] for _ in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Dict =[] for step in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" ) torch.save(scheduler.state_dict() , UpperCamelCase ) A__ : Dict =torch.load(UpperCamelCase ) scheduler.load_state_dict(UpperCamelCase ) return lrs @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] ) A__ : Any =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _UpperCAmelCase ( self : Dict ): A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Dict =torch.tensor([0.4, 0.2, -0.5] ) A__ : Optional[int] =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : int =Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , ) for _ in range(1000 ): A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' __magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None __magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None __magic_name__ : Union[str, Any] = 10 def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A__ : Union[str, Any] ={ get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A__ , A__ : Any =data A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : str ): A__ : int =fn def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ): return self.fn(*UpperCamelCase__ , **UpperCamelCase__ ) @classmethod def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ): A__ : str =list(map(self , scheduler.lr_lambdas ) )
656
0
def lowercase__ ( A_: float , A_: float , A_: int ) -> float: """simple docstring""" if principal <= 0: raise Exception("""Principal borrowed must be > 0""" ) if rate_per_annum < 0: raise Exception("""Rate of interest must be >= 0""" ) if years_to_repay <= 0 or not isinstance(A_ , A_ ): raise Exception("""Years to repay must be an integer > 0""" ) # Yearly rate is divided by 12 to get monthly rate __UpperCAmelCase =rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __UpperCAmelCase =years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
68
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A : List[Any] = logging.get_logger("transformers.models.speecht5") __A : Optional[Any] = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A : Optional[int] = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A : List[str] = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A : List[Any] = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A : Union[str, Any] = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A : Any = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A : Union[str, Any] = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A : Optional[int] = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A : Union[str, Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A : Optional[Any] = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : Optional[int] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : int = [] __A : int = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A : Optional[Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A : Tuple = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A : Union[str, Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ): """simple docstring""" for attribute in key.split("." ): A__ : Dict =getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape else: A__ : Tuple =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": A__ : Any =value elif weight_type == "weight_g": A__ : Any =value elif weight_type == "weight_v": A__ : Any =value elif weight_type == "bias": A__ : Tuple =value elif weight_type == "running_mean": A__ : Dict =value elif weight_type == "running_var": A__ : List[str] =value elif weight_type == "num_batches_tracked": A__ : Dict =value else: A__ : Optional[int] =value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ): """simple docstring""" for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A__ , A__ : List[str] =key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ): """simple docstring""" A__ : Tuple =[] if task == "s2t": A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder A__ : int =MAPPING_S2T A__ : List[Any] =IGNORE_KEYS_S2T elif task == "t2s": A__ : Union[str, Any] =None A__ : List[Any] =MAPPING_T2S A__ : Tuple =IGNORE_KEYS_T2S elif task == "s2s": A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder A__ : Tuple =MAPPING_S2S A__ : Any =IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase , UpperCamelCase ): logger.info(F'''{name} was ignored''' ) continue A__ : Optional[Any] =False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , ) A__ : List[Any] =True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: A__ , A__ : Dict =key.split(".*." ) if prefix in name and suffix in name: A__ : int =suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: A__ : List[Any] =True if "*" in mapped_key: A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2] A__ : int =mapped_key.replace("*" , UpperCamelCase ) if "weight_g" in name: A__ : str ="weight_g" elif "weight_v" in name: A__ : Optional[Any] ="weight_v" elif "bias" in name: A__ : Any ="bias" elif "weight" in name: A__ : Optional[int] ="weight" elif "running_mean" in name: A__ : Tuple ="running_mean" elif "running_var" in name: A__ : Optional[int] ="running_var" elif "num_batches_tracked" in name: A__ : str ="num_batches_tracked" else: A__ : List[Any] =None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ): """simple docstring""" A__ : Any =full_name.split("conv_layers." )[-1] A__ : Dict =name.split("." ) A__ : int =int(items[0] ) A__ : str =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A__ : Optional[Any] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A__ : Optional[int] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase ) @torch.no_grad() def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ): """simple docstring""" if config_path is not None: A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase ) else: A__ : Any =SpeechTaConfig() if task == "s2t": A__ : Union[str, Any] =config.max_text_positions A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase ) elif task == "t2s": A__ : str =1876 A__ : Optional[int] =600 A__ : Tuple =config.max_speech_positions A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase ) elif task == "s2s": A__ : str =1876 A__ : Tuple =config.max_speech_positions A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) A__ : int =mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) A__ : Dict =SpeechTaFeatureExtractor() A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) A__ : Union[str, Any] =torch.load(UpperCamelCase ) recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase ) model.push_to_hub(UpperCamelCase ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A : str = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
656
0
'''simple docstring''' import random def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : bool = False ) -> dict: __snake_case = {i: [] for i in range(_UpperCAmelCase )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(_UpperCAmelCase ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(_UpperCAmelCase ): for j in range(i + 1 , _UpperCAmelCase ): if random.random() < probability: graph[i].append(_UpperCAmelCase ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(_UpperCAmelCase ) return graph def __UpperCAmelCase ( _UpperCAmelCase : int ) -> dict: return { i: [j for j in range(_UpperCAmelCase ) if i != j] for i in range(_UpperCAmelCase ) } if __name__ == "__main__": import doctest doctest.testmod()
69
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase): '''simple docstring''' __magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ): super().__init__() A__ : Dict =prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) A__ : Optional[int] =prefix_inner_dim A__ : Optional[int] =prefix_hidden_dim A__ : Optional[int] =( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : Optional[int] =( nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : str =GPTaConfig( vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , ) A__ : Any =GPTaLMHeadModel(UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ): A__ : int =self.transformer.transformer.wte(UpperCamelCase__ ) A__ : Tuple =self.encode_prefix(UpperCamelCase__ ) A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ ) A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 ) A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ): return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ): return self.encode_prefix(UpperCamelCase__ ) @torch.no_grad() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ): A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 ) A__ : List[str] =[] A__ : Dict =[] for feature in features: A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature # Only support beam search for now A__ , A__ : Optional[Any] =self.generate_beam( input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A__ : Optional[Any] =torch.stack(UpperCamelCase__ ) A__ : Optional[int] =torch.stack(UpperCamelCase__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ): A__ : str =eos_token_id A__ : Optional[Any] =None A__ : int =None A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int ) A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool ) if input_embeds is not None: A__ : Union[str, Any] =input_embeds else: A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ ) A__ : Tuple =outputs.logits A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A__ : Optional[Any] =logits.softmax(-1 ).log() if scores is None: A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 ) A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] ) A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A__ : str =next_tokens else: A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] ) A__ : str =torch.cat((tokens, next_tokens) , dim=1 ) else: A__ : Union[str, Any] =-float(np.inf ) A__ : Dict =0 A__ : Optional[Any] =scores[:, None] + logits seq_lengths[~is_stopped] += 1 A__ : Optional[Any] =scores_sum / seq_lengths[:, None] A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 ) A__ : Tuple =next_tokens // scores_sum.shape[1] A__ : List[Any] =seq_lengths[next_tokens_source] A__ : int =next_tokens % scores_sum.shape[1] A__ : str =next_tokens.unsqueeze(1 ) A__ : List[Any] =tokens[next_tokens_source] A__ : int =torch.cat((tokens, next_tokens) , dim=1 ) A__ : List[str] =generated[next_tokens_source] A__ : Optional[Any] =scores_sum_average * seq_lengths A__ : Optional[int] =is_stopped[next_tokens_source] A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A__ : str =torch.cat((generated, next_token_embed) , dim=1 ) A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze() if is_stopped.all(): break A__ : Optional[int] =scores / seq_lengths A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ ) # tokens tensors are already padded to max_seq_length A__ : int =[tokens[i] for i in order] A__ : Any =torch.stack(UpperCamelCase__ , dim=0 ) A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
656
0
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class A: '''simple docstring''' UpperCamelCase = LEDConfig UpperCamelCase = {} UpperCamelCase = '''gelu''' def __init__( self : List[Any] , A_ : Union[str, Any] , A_ : Optional[Any]=13 , A_ : List[Any]=7 , A_ : str=True , A_ : Optional[Any]=False , A_ : Any=99 , A_ : str=32 , A_ : str=2 , A_ : Optional[Any]=4 , A_ : Tuple=37 , A_ : Dict=0.1 , A_ : str=0.1 , A_ : Optional[Any]=20 , A_ : Optional[Any]=2 , A_ : int=1 , A_ : List[str]=0 , A_ : List[str]=4 , ) -> Tuple: """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = eos_token_id lowerCamelCase_ = pad_token_id lowerCamelCase_ = bos_token_id lowerCamelCase_ = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after lowerCamelCase_ = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests lowerCamelCase_ = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) lowerCamelCase_ = prepare_led_inputs_dict(A_ , A_ , A_ ) lowerCamelCase_ = tf.concat( [tf.zeros_like(A_ )[:, :-1], tf.ones_like(A_ )[:, -1:]] , axis=-1 , ) lowerCamelCase_ = global_attention_mask return config, inputs_dict def a__ ( self : Optional[Any] , A_ : Optional[int] , A_ : Tuple ) -> str: """simple docstring""" lowerCamelCase_ = TFLEDModel(config=A_ ).get_decoder() lowerCamelCase_ = inputs_dict['input_ids'] lowerCamelCase_ = input_ids[:1, :] lowerCamelCase_ = inputs_dict['attention_mask'][:1, :] lowerCamelCase_ = 1 # first forward pass lowerCamelCase_ = model(A_ , attention_mask=A_ , use_cache=A_ ) lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCamelCase_ = model(A_ , attention_mask=A_ )[0] lowerCamelCase_ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(A_ , A_ , rtol=1E-3 ) def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : int , lowercase : Union[str, Any] , lowercase : int=None , lowercase : str=None , lowercase : Tuple=None , lowercase : str=None , ): '''simple docstring''' if attention_mask is None: lowerCamelCase_ = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { '''conversational''': TFLEDForConditionalGeneration, '''feature-extraction''': TFLEDModel, '''summarization''': TFLEDForConditionalGeneration, '''text2text-generation''': TFLEDForConditionalGeneration, '''translation''': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = TFLEDModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=A_ ) def a__ ( self : Dict ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Any ) -> str: """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*A_ ) def a__ ( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = tf.zeros_like(inputs_dict['attention_mask'] ) lowerCamelCase_ = 2 lowerCamelCase_ = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , ) lowerCamelCase_ = True lowerCamelCase_ = self.model_tester.seq_length lowerCamelCase_ = self.model_tester.encoder_seq_length def check_decoder_attentions_output(A_ : Optional[Any] ): lowerCamelCase_ = outputs.decoder_attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(A_ : Dict ): lowerCamelCase_ = [t.numpy() for t in outputs.encoder_attentions] lowerCamelCase_ = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) ) lowerCamelCase_ = len(A_ ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) if self.is_encoder_decoder: lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_decoder_attentions_output(A_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase_ = True lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) # Check attention is always last and order is fine lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = model_class(A_ ) lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) ) self.assertEqual(model.config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) @unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' ) def a__ ( self : Dict ) -> List[Any]: """simple docstring""" pass def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ): '''simple docstring''' return tf.constant(lowercase , dtype=tf.intaa ) lowerCamelCase : Tuple = 1e-4 @slow @require_tf class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : str ) -> Any: """simple docstring""" lowerCamelCase_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led # change to intended input here lowerCamelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) lowerCamelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) lowerCamelCase_ = prepare_led_inputs_dict(model.config , A_ , A_ ) lowerCamelCase_ = model(**A_ )[0] lowerCamelCase_ = (1, 1024, 768) self.assertEqual(output.shape , A_ ) # change to expected output here lowerCamelCase_ = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-3 ) def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ) # change to intended input here lowerCamelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) lowerCamelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) lowerCamelCase_ = prepare_led_inputs_dict(model.config , A_ , A_ ) lowerCamelCase_ = model(**A_ )[0] lowerCamelCase_ = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , A_ ) # change to expected output here lowerCamelCase_ = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-3 , rtol=1E-3 )
70
"""simple docstring""" import os def lowercase ( ): """simple docstring""" A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) ) A__ : str =os.path.join(UpperCamelCase , "triangle.txt" ) with open(UpperCamelCase ) as f: A__ : Optional[int] =f.readlines() A__ : str =[] for line in triangle: A__ : Union[str, Any] =[] for number in line.strip().split(" " ): numbers_from_line.append(int(UpperCamelCase ) ) a.append(UpperCamelCase ) for i in range(1 , len(UpperCamelCase ) ): for j in range(len(a[i] ) ): A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0 A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(UpperCamelCase , UpperCamelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
656
0
'''simple docstring''' def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return abs(_SCREAMING_SNAKE_CASE ) if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : List[str] = y, x % y return abs(_SCREAMING_SNAKE_CASE ) def a__ ( ) -> str: """simple docstring""" try: UpperCAmelCase_ : int = input("Enter two integers separated by comma (,): " ).split("," ) UpperCAmelCase_ : Optional[Any] = int(nums[0] ) UpperCAmelCase_ : Dict = int(nums[1] ) print( F'''greatest_common_divisor({num_a}, {num_a}) = ''' F'''{greatest_common_divisor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' ) print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' ) except (IndexError, UnboundLocalError, ValueError): print("Wrong input" ) if __name__ == "__main__": main()
71
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A : int = logging.get_logger(__name__) def lowercase ( UpperCamelCase : Any ): """simple docstring""" A__ : str =OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): A__ : Dict =key.replace("module.encoder" , "glpn.encoder" ) if key.startswith("module.decoder" ): A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )] A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' ) if "norm" in key: A__ : Dict =key.replace("norm" , "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' ) if "layer_norm1" in key: A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 A__ : int =key[key.find("block" ) + len("block" )] A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' ) if "attn.q" in key: A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: A__ : str =key.replace("attn" , "attention.self" ) if "fc1" in key: A__ : Dict =key.replace("fc1" , "dense1" ) if "fc2" in key: A__ : str =key.replace("fc2" , "dense2" ) if "linear_pred" in key: A__ : List[Any] =key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" ) A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A__ : str =key[key.find("linear_c" ) + len("linear_c" )] A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' ) if "bot_conv" in key: A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" ) if "skip_conv1" in key: A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" ) if "skip_conv2" in key: A__ : int =key.replace("skip_conv2" , "2.convolution" ) if "fusion1" in key: A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" ) if "fusion2" in key: A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" ) if "fusion3" in key: A__ : int =key.replace("fusion3" , "3.fusion" ) if "fusion" in key and "conv" in key: A__ : List[str] =key.replace("conv" , "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" ) A__ : int =value return new_state_dict def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ): """simple docstring""" # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict A__ : List[str] =kv_weight[ : config.hidden_sizes[i], : ] A__ : Dict =kv_bias[: config.hidden_sizes[i]] A__ : Any =kv_weight[ config.hidden_sizes[i] :, : ] A__ : Any =kv_bias[config.hidden_sizes[i] :] def lowercase ( ): """simple docstring""" A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg" A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return image @torch.no_grad() def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ): """simple docstring""" A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) A__ : str =GLPNImageProcessor() # prepare image A__ : Any =prepare_img() A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) ) # rename keys A__ : Union[str, Any] =rename_keys(UpperCamelCase ) # key and value matrices need special treatment read_in_k_v(UpperCamelCase , UpperCamelCase ) # create HuggingFace model and load state dict A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # forward pass A__ : int =model(UpperCamelCase ) A__ : Optional[Any] =outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: A__ : List[Any] =torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: A__ : Tuple =torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) A__ : str =torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) __A : Any = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
656
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : str = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class __magic_name__ ( __SCREAMING_SNAKE_CASE ): UpperCamelCase__ = 'xlm-roberta' def __init__( self , snake_case_=3_05_22 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ): super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) lowercase =vocab_size lowercase =hidden_size lowercase =num_hidden_layers lowercase =num_attention_heads lowercase =hidden_act lowercase =intermediate_size lowercase =hidden_dropout_prob lowercase =attention_probs_dropout_prob lowercase =max_position_embeddings lowercase =type_vocab_size lowercase =initializer_range lowercase =layer_norm_eps lowercase =position_embedding_type lowercase =use_cache lowercase =classifier_dropout class __magic_name__ ( __SCREAMING_SNAKE_CASE ): @property def _A( self ): if self.task == "multiple-choice": lowercase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase ={0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
72
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Optional[Any] = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Union[str, Any] = """gpt_neo""" __magic_name__ : Union[str, Any] = ["""past_key_values"""] __magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ): A__ : Optional[Any] =vocab_size A__ : Dict =max_position_embeddings A__ : List[str] =hidden_size A__ : List[Any] =num_layers A__ : Tuple =num_heads A__ : List[str] =intermediate_size A__ : Tuple =window_size A__ : Dict =activation_function A__ : str =resid_dropout A__ : Union[str, Any] =embed_dropout A__ : List[str] =attention_dropout A__ : Tuple =classifier_dropout A__ : int =layer_norm_epsilon A__ : int =initializer_range A__ : str =use_cache A__ : Tuple =bos_token_id A__ : int =eos_token_id A__ : int =attention_types A__ : Any =self.expand_attention_types_params(UpperCamelCase__ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) @staticmethod def _UpperCAmelCase ( UpperCamelCase__ : List[str] ): A__ : Optional[Any] =[] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): """simple docstring""" import torch A__ : List[str] =input.size() A__ : Dict =len(UpperCamelCase ) A__ : Optional[int] =shape[dimension] A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1 A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None] A__ : Tuple =[slice(UpperCamelCase )] * rank A__ : int =indices A__ : Optional[int] =input[s] A__ : Union[str, Any] =list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase ) def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ): """simple docstring""" import torch A__ : List[str] =torch.arange(1 , UpperCamelCase ) A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =remainders == 0 A__ : str =candidates[divisor_indices] A__ : int =torch.max(UpperCamelCase ) return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' @property def _UpperCAmelCase ( self : List[Any] ): A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" ) A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"} else: A__ : Tuple ={0: "batch", 1: "sequence"} return common_inputs @property def _UpperCAmelCase ( self : List[str] ): return self._config.num_heads def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ): A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape # Not using the same length for past_key_values A__ : Union[str, Any] =seqlen + 2 A__ : List[Any] =( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A__ : Optional[Any] =[ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] A__ : Optional[Any] =common_inputs["attention_mask"] if self.use_past: A__ : Any =ordered_inputs["attention_mask"].dtype A__ : Tuple =torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self : List[str] ): return 13
656
0
import math import flax.linen as nn import jax.numpy as jnp def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1.0e4 , _UpperCAmelCase = False , _UpperCAmelCase = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even''' SCREAMING_SNAKE_CASE = float(embedding_dim // 2) SCREAMING_SNAKE_CASE = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift) SCREAMING_SNAKE_CASE = min_timescale * jnp.exp(jnp.arange(_UpperCAmelCase , dtype=jnp.floataa) * -log_timescale_increment) SCREAMING_SNAKE_CASE = jnp.expand_dims(_UpperCAmelCase , 1) * jnp.expand_dims(_UpperCAmelCase , 0) # scale embeddings SCREAMING_SNAKE_CASE = scale * emb if flip_sin_to_cos: SCREAMING_SNAKE_CASE = jnp.concatenate([jnp.cos(_UpperCAmelCase), jnp.sin(_UpperCAmelCase)] , axis=1) else: SCREAMING_SNAKE_CASE = jnp.concatenate([jnp.sin(_UpperCAmelCase), jnp.cos(_UpperCAmelCase)] , axis=1) SCREAMING_SNAKE_CASE = jnp.reshape(_UpperCAmelCase , [jnp.shape(_UpperCAmelCase)[0], embedding_dim]) return signal class _snake_case ( nn.Module ): _lowercase : int = 32 _lowercase : jnp.dtype = jnp.floataa @nn.compact def __call__( self , a) -> int: SCREAMING_SNAKE_CASE = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1')(a) SCREAMING_SNAKE_CASE = nn.silu(a) SCREAMING_SNAKE_CASE = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2')(a) return temb class _snake_case ( nn.Module ): _lowercase : int = 32 _lowercase : bool = False _lowercase : float = 1 @nn.compact def __call__( self , a) -> str: return get_sinusoidal_embeddings( a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
73
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Any = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Tuple = """megatron-bert""" def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ): super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) A__ : Optional[int] =vocab_size A__ : Optional[int] =hidden_size A__ : str =num_hidden_layers A__ : Any =num_attention_heads A__ : str =hidden_act A__ : Optional[int] =intermediate_size A__ : str =hidden_dropout_prob A__ : str =attention_probs_dropout_prob A__ : List[Any] =max_position_embeddings A__ : List[Any] =type_vocab_size A__ : Tuple =initializer_range A__ : Any =layer_norm_eps A__ : Any =position_embedding_type A__ : Union[str, Any] =use_cache
656
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''pixel_values'''] def __init__( self : Optional[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Dict , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'''shortest_edge''': 384} __SCREAMING_SNAKE_CASE : int = get_size_dict(_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = do_resize __SCREAMING_SNAKE_CASE : Tuple = size # Default value set here for backwards compatibility where the value in config is None __SCREAMING_SNAKE_CASE : Any = crop_pct if crop_pct is not None else 224 / 256 __SCREAMING_SNAKE_CASE : List[Any] = resample __SCREAMING_SNAKE_CASE : Tuple = do_rescale __SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor __SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize __SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase__ ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) __SCREAMING_SNAKE_CASE : Tuple = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __SCREAMING_SNAKE_CASE : int = int(shortest_edge / crop_pct ) __SCREAMING_SNAKE_CASE : List[Any] = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : Dict = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ): """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ): """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : Tuple , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE : Dict = crop_pct if crop_pct is not None else self.crop_pct __SCREAMING_SNAKE_CASE : str = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE : List[Any] = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE : List[str] = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size __SCREAMING_SNAKE_CASE : Tuple = get_size_dict(_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(_A ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE : Tuple = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE : int = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE : List[str] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
74
"""simple docstring""" from __future__ import annotations def lowercase ( UpperCamelCase : list[float] ): """simple docstring""" if len(UpperCamelCase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) A__ : Union[str, Any] =nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
656
0
'''simple docstring''' import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( enum.Enum ): lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 @add_end_docstrings(__a ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'generated' def __init__( self : List[str] , *_A : Optional[Any] , **_A : List[str] ): '''simple docstring''' super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def lowercase_ ( self : Any , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Any=None , _A : Union[str, Any]=None , _A : str=None , **_A : List[str] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {} if truncation is not None: UpperCAmelCase__ : Union[str, Any] = truncation UpperCAmelCase__ : Any = generate_kwargs UpperCAmelCase__ : Optional[Any] = {} if return_tensors is not None and return_type is None: UpperCAmelCase__ : Any = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: UpperCAmelCase__ : Tuple = return_type if clean_up_tokenization_spaces is not None: UpperCAmelCase__ : List[str] = clean_up_tokenization_spaces if stop_sequence is not None: UpperCAmelCase__ : Any = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) UpperCAmelCase__ : Union[str, Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def lowercase_ ( self : List[str] , _A : int , _A : int , _A : int ): '''simple docstring''' return True def lowercase_ ( self : List[str] , *_A : List[Any] , _A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _A ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) UpperCAmelCase__ : Tuple = ([prefix + arg for arg in args[0]],) UpperCAmelCase__ : Dict = True elif isinstance(args[0] , _A ): UpperCAmelCase__ : List[str] = (prefix + args[0],) UpperCAmelCase__ : Dict = False else: raise ValueError( f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) UpperCAmelCase__ : List[str] = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : int , *_A : int , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = super().__call__(*_A , **_A ) if ( isinstance(args[0] , _A ) and all(isinstance(_A , _A ) for el in args[0] ) and all(len(_A ) == 1 for res in result ) ): return [res[0] for res in result] return result def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = self._parse_and_tokenize(_A , truncation=_A , **_A ) return inputs def lowercase_ ( self : Tuple , _A : str , **_A : Any ): '''simple docstring''' if self.framework == "pt": UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model_inputs['''input_ids'''].shape elif self.framework == "tf": UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = tf.shape(model_inputs['''input_ids'''] ).numpy() UpperCAmelCase__ : str = generate_kwargs.get('''min_length''' , self.model.config.min_length ) UpperCAmelCase__ : Optional[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) UpperCAmelCase__ : int = self.model.generate(**_A , **_A ) UpperCAmelCase__ : List[Any] = output_ids.shape[0] if self.framework == "pt": UpperCAmelCase__ : str = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": UpperCAmelCase__ : Union[str, Any] = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def lowercase_ ( self : Union[str, Any] , _A : Any , _A : Any=ReturnType.TEXT , _A : Optional[Any]=False ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: UpperCAmelCase__ : Any = {f"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: UpperCAmelCase__ : List[str] = { f"""{self.return_name}_text""": self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) } records.append(_A ) return records @add_end_docstrings(__a ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'summary' def __call__( self : Tuple , *_A : Optional[int] , **_A : Optional[int] ): '''simple docstring''' return super().__call__(*_A , **_A ) def lowercase_ ( self : Optional[Any] , _A : int , _A : int , _A : int ): '''simple docstring''' if max_length < min_length: logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(__a ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'translation' def lowercase_ ( self : Tuple , _A : int , _A : int , _A : int ): '''simple docstring''' if input_length > 0.9 * max_length: logger.warning( f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def lowercase_ ( self : List[Any] , *_A : Any , _A : Dict=TruncationStrategy.DO_NOT_TRUNCATE , _A : str=None , _A : Any=None ): '''simple docstring''' if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ): return self.tokenizer._build_translation_inputs( *_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A ) else: return super()._parse_and_tokenize(*_A , truncation=_A ) def lowercase_ ( self : Union[str, Any] , _A : Optional[Any]=None , _A : Optional[int]=None , **_A : Dict ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = super()._sanitize_parameters(**_A ) if src_lang is not None: UpperCAmelCase__ : int = src_lang if tgt_lang is not None: UpperCAmelCase__ : Union[str, Any] = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. UpperCAmelCase__ : List[Any] = kwargs.get('''task''' , self.task ) UpperCAmelCase__ : int = task.split('''_''' ) if task and len(_A ) == 4: # translation, XX, to YY UpperCAmelCase__ : Any = items[1] UpperCAmelCase__ : Optional[int] = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : Union[str, Any] , *_A : int , **_A : Union[str, Any] ): '''simple docstring''' return super().__call__(*_A , **_A )
75
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A : Optional[Any] = { "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", "MegaForCausalLM", "MegaForMaskedLM", "MegaForMultipleChoice", "MegaForQuestionAnswering", "MegaForSequenceClassification", "MegaForTokenClassification", "MegaModel", "MegaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
656
0
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict a_ = namedtuple( '_TestCommandArgs', [ 'dataset', 'name', 'cache_dir', 'data_dir', 'all_configs', 'save_infos', 'ignore_verifications', 'force_redownload', 'clear_cache', ], defaults=[None, None, None, False, False, False, False, False], ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : List[Any] = _TestCommandArgs(dataset=__UpperCamelCase , all_configs=__UpperCamelCase , save_infos=__UpperCamelCase ) __lowercase : Dict = TestCommand(*__UpperCamelCase ) test_command.run() __lowercase : Optional[Any] = os.path.join(__UpperCamelCase , '''README.md''' ) assert os.path.exists(__UpperCamelCase ) __lowercase : Optional[int] = DatasetInfosDict.from_directory(__UpperCamelCase ) __lowercase : List[Any] = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 2_35_15_63, '''num_examples''': 1_00_00, }, { '''name''': '''validation''', '''num_bytes''': 23_84_18, '''num_examples''': 10_00, }, ] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: __lowercase ,__lowercase : Optional[Any] = getattr(dataset_infos['''default'''] , __UpperCamelCase ), getattr(expected_dataset_infos['''default'''] , __UpperCamelCase ) if key == "num_bytes": assert is_apercent_close(__UpperCamelCase , __UpperCamelCase ) elif key == "splits": assert list(__UpperCamelCase ) == list(__UpperCamelCase ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
76
"""simple docstring""" def lowercase ( UpperCamelCase : int ): """simple docstring""" if num <= 0: raise ValueError("Input must be a positive integer" ) A__ : Union[str, Any] =[True] * (num + 1) A__ : Union[str, Any] =2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , UpperCamelCase ): A__ : str =False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[int] = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
656
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class a__ ( unittest.TestCase ): def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : str = tempfile.mkdtemp() # fmt: off __UpperCAmelCase : List[Any] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on __UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_)))) __UpperCAmelCase : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] __UpperCAmelCase : str = {"unk_token": "<unk>"} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) __UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as fp: fp.write(json.dumps(UpperCamelCase_) + "\n") with open(self.merges_file , "w" , encoding="utf-8") as fp: fp.write("\n".join(UpperCamelCase_)) __UpperCAmelCase : str = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } __UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , UpperCamelCase_) with open(self.image_processor_file , "w" , encoding="utf-8") as fp: json.dump(UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Any , **UpperCamelCase_ : Dict): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_) def a_ ( self : Union[str, Any] , **UpperCamelCase_ : List[Any]): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_) def a_ ( self : Tuple , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" shutil.rmtree(self.tmpdirname) def a_ ( self : str): """simple docstring""" __UpperCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] __UpperCAmelCase : List[str] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1)) for x in image_inputs] return image_inputs def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = self.get_tokenizer() __UpperCAmelCase : int = self.get_rust_tokenizer() __UpperCAmelCase : int = self.get_image_processor() __UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_) processor_slow.save_pretrained(self.tmpdirname) __UpperCAmelCase : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_) __UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_) processor_fast.save_pretrained(self.tmpdirname) __UpperCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) __UpperCAmelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") __UpperCAmelCase : str = self.get_image_processor(do_normalize=UpperCamelCase_) __UpperCAmelCase : Any = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , UpperCamelCase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.get_image_processor() __UpperCAmelCase : List[str] = self.get_tokenizer() __UpperCAmelCase : Optional[int] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_) __UpperCAmelCase : str = self.prepare_image_inputs() __UpperCAmelCase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : List[str] = processor(images=UpperCamelCase_ , return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2) def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[int] = self.get_image_processor() __UpperCAmelCase : Union[str, Any] = self.get_tokenizer() __UpperCAmelCase : str = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_) __UpperCAmelCase : Tuple = "lower newer" __UpperCAmelCase : List[Any] = processor(text=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Any = tokenizer(UpperCamelCase_ , return_tensors="np") for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist()) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : int = self.get_tokenizer() __UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = "lower newer" __UpperCAmelCase : str = self.prepare_image_inputs() __UpperCAmelCase : Any = processor(text=UpperCamelCase_ , images=UpperCamelCase_) self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_): processor() def a_ ( self : str): """simple docstring""" __UpperCAmelCase : Dict = "google/owlvit-base-patch32" __UpperCAmelCase : str = OwlViTProcessor.from_pretrained(UpperCamelCase_) __UpperCAmelCase : int = ["cat", "nasa badge"] __UpperCAmelCase : Dict = processor(text=UpperCamelCase_) __UpperCAmelCase : List[Any] = 16 self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape , (2, seq_length)) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_): processor() def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[Any] = "google/owlvit-base-patch32" __UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[int] = [["cat", "nasa badge"], ["person"]] __UpperCAmelCase : List[str] = processor(text=UpperCamelCase_) __UpperCAmelCase : List[str] = 16 __UpperCAmelCase : str = len(UpperCamelCase_) __UpperCAmelCase : List[str] = max([len(UpperCamelCase_) for texts in input_texts]) self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length)) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_): processor() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Tuple = "google/owlvit-base-patch32" __UpperCAmelCase : Dict = OwlViTProcessor.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = ["cat", "nasa badge"] __UpperCAmelCase : Optional[int] = processor(text=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 16 __UpperCAmelCase : Optional[Any] = inputs["input_ids"] __UpperCAmelCase : Any = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape , (2, seq_length)) self.assertListEqual(list(input_ids[0]) , predicted_ids[0]) self.assertListEqual(list(input_ids[1]) , predicted_ids[1]) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = self.get_image_processor() __UpperCAmelCase : Any = self.get_tokenizer() __UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_) __UpperCAmelCase : Any = self.prepare_image_inputs() __UpperCAmelCase : List[Any] = self.prepare_image_inputs() __UpperCAmelCase : Optional[Any] = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_) self.assertListEqual(list(inputs.keys()) , ["query_pixel_values", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_): processor() def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : str = self.get_tokenizer() __UpperCAmelCase : str = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_) __UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase : List[str] = processor.batch_decode(UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
77
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : List[Any] ): A__ : Tuple =torch.nn.Linear(10 , 10 ) A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 ) A__ : Union[str, Any] =Accelerator() A__ : str =accelerator.prepare(UpperCamelCase__ ) try: pickle.loads(pickle.dumps(UpperCamelCase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
656
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : Optional[Any]=2_81_23 ) -> str: '''simple docstring''' UpperCAmelCase_ = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i UpperCAmelCase_ = set() UpperCAmelCase_ = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(snake_case_ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
78
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __A : Optional[int] = None __A : Union[str, Any] = logging.get_logger(__name__) __A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __A : str = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } __A : List[str] = { "google/bigbird-roberta-base": 4_096, "google/bigbird-roberta-large": 4_096, "google/bigbird-base-trivia-itc": 4_096, } __A : Tuple = "▁" class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Dict = VOCAB_FILES_NAMES __magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP __magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ : List[Any] = BigBirdTokenizer __magic_name__ : Any = ["""input_ids""", """attention_mask"""] __magic_name__ : List[int] = [] def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ): A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) A__ : List[Any] =vocab_file A__ : Optional[int] =False if not self.vocab_file else True def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : Tuple =[self.sep_token_id] A__ : str =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1] def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : Tuple =[self.sep_token_id] A__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return A__ : List[str] =os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
656
0
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class UpperCAmelCase_ ( __lowerCamelCase ): def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , *_lowerCAmelCase , **_lowerCAmelCase ): super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) if config is None: assert isinstance(self.model , _lowerCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f" {self.model.__class__}" ) UpperCAmelCase__ : Union[str, Any] = self.model.config else: UpperCAmelCase__ : Optional[Any] = config UpperCAmelCase__ : Dict = data_args UpperCAmelCase__ : Dict = self.config.tgt_vocab_size if isinstance(self.config , _lowerCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for" """ padding..""" ) if self.args.label_smoothing == 0: UpperCAmelCase__ : Dict = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss UpperCAmelCase__ : List[str] = label_smoothed_nll_loss def __UpperCAmelCase ( self , _lowerCAmelCase ): if self.optimizer is None: UpperCAmelCase__ : Tuple = ["""bias""", """LayerNorm.weight"""] UpperCAmelCase__ : str = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] UpperCAmelCase__ : Dict = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: UpperCAmelCase__ : Tuple = Adafactor UpperCAmelCase__ : List[str] = {"""scale_parameter""": False, """relative_step""": False} else: UpperCAmelCase__ : List[Any] = AdamW UpperCAmelCase__ : Tuple = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } UpperCAmelCase__ : Optional[Any] = self.args.learning_rate if self.sharded_ddp: UpperCAmelCase__ : List[str] = OSS( params=_lowerCAmelCase , optim=_lowerCAmelCase , **_lowerCAmelCase , ) else: UpperCAmelCase__ : Optional[int] = optimizer_cls(_lowerCAmelCase , **_lowerCAmelCase ) if self.lr_scheduler is None: UpperCAmelCase__ : str = self._get_lr_scheduler(_lowerCAmelCase ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def __UpperCAmelCase ( self , _lowerCAmelCase ): UpperCAmelCase__ : Dict = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": UpperCAmelCase__ : str = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": UpperCAmelCase__ : List[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: UpperCAmelCase__ : List[Any] = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_lowerCAmelCase ) return scheduler def __UpperCAmelCase ( self ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token UpperCAmelCase__ : Dict = model(**_lowerCAmelCase , use_cache=_lowerCAmelCase )[0] UpperCAmelCase__ : str = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model(**_lowerCAmelCase , labels=_lowerCAmelCase , use_cache=_lowerCAmelCase )[:2] else: # compute label smoothed loss UpperCAmelCase__ : int = model(**_lowerCAmelCase , use_cache=_lowerCAmelCase )[0] UpperCAmelCase__ : str = torch.nn.functional.log_softmax(_lowerCAmelCase , dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.loss_fn(_lowerCAmelCase , _lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase__ : Optional[int] = inputs.pop("""labels""" ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self._compute_loss(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return loss def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ): UpperCAmelCase__ : Any = self._prepare_inputs(_lowerCAmelCase ) UpperCAmelCase__ : int = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: UpperCAmelCase__ : Any = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_lowerCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: UpperCAmelCase__ : str = self._pad_tensors_to_max_len(_lowerCAmelCase , gen_kwargs["""max_length"""] ) UpperCAmelCase__ : Union[str, Any] = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self._compute_loss(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase__ : str = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) UpperCAmelCase__ : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: UpperCAmelCase__ : Any = self._pad_tensors_to_max_len(_lowerCAmelCase , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): # If PAD token is not defined at least EOS token has to be defined UpperCAmelCase__ : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f" padded to `max_length`={max_length}" ) UpperCAmelCase__ : Optional[Any] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) UpperCAmelCase__ : List[str] = tensor return padded_tensor
79
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[int] = {"vocab_file": "spiece.model"} __A : List[Any] = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ): A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) A__ : Dict =3 A__ : int =do_lower_case A__ : str =remove_space A__ : Optional[Any] =keep_accents A__ : int =vocab_file A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) A__ : Union[str, Any] =jieba A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _UpperCAmelCase ( self : Union[str, Any] ): return len(self.sp_model ) def _UpperCAmelCase ( self : Optional[int] ): A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): A__ : Union[str, Any] =self.__dict__.copy() A__ : Tuple =None return state def __setstate__( self : Tuple , UpperCamelCase__ : int ): A__ : Union[str, Any] =d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A__ : Optional[int] ={} A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ): if self.remove_space: A__ : Optional[int] =" ".join(inputs.strip().split() ) else: A__ : Optional[Any] =inputs A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ ) A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: A__ : str =outputs.lower() return outputs def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ): A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ ) A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) A__ : List[str] =[] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A__ : Union[str, Any] =cur_pieces[1:] else: A__ : List[str] =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ): return self.sp_model.PieceToId(UpperCamelCase__ ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ): return self.sp_model.IdToPiece(UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ): A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : List[str] =[self.sep_token_id] A__ : str =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : List[str] =[self.sep_token_id] A__ : Optional[Any] =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return A__ : Tuple =os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: A__ : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,) def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ): A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ ) A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
656
0
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel __UpperCamelCase : Dict = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } __UpperCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def snake_case ( lowerCamelCase , lowerCamelCase=False ): '''simple docstring''' __lowercase , __lowercase = create_model( """HTSAT-tiny""" , """roberta""" , lowerCamelCase , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCamelCase , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = {} __lowercase = r""".*sequential.(\d+).*""" __lowercase = r""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __lowercase = key.replace(lowerCamelCase , lowerCamelCase ) if re.match(lowerCamelCase , lowerCamelCase ): # replace sequential layers with list __lowercase = re.match(lowerCamelCase , lowerCamelCase ).group(1 ) __lowercase = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCamelCase )//3}.linear.' ) elif re.match(lowerCamelCase , lowerCamelCase ): __lowercase = int(re.match(lowerCamelCase , lowerCamelCase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... __lowercase = 1 if projecton_layer == 0 else 2 __lowercase = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value __lowercase = value __lowercase = mixed_qkv.size(0 ) // 3 __lowercase = mixed_qkv[:qkv_dim] __lowercase = mixed_qkv[qkv_dim : qkv_dim * 2] __lowercase = mixed_qkv[qkv_dim * 2 :] __lowercase = query_layer __lowercase = key_layer __lowercase = value_layer else: __lowercase = value return model_state_dict def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ): '''simple docstring''' __lowercase , __lowercase = init_clap(lowerCamelCase , enable_fusion=lowerCamelCase ) clap_model.eval() __lowercase = clap_model.state_dict() __lowercase = rename_state_dict(lowerCamelCase ) __lowercase = ClapConfig() __lowercase = enable_fusion __lowercase = ClapModel(lowerCamelCase ) # ignore the spectrogram embedding layer model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) transformers_config.save_pretrained(lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") __UpperCamelCase : Optional[Any] = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
80
"""simple docstring""" def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" def count_of_possible_combinations(UpperCamelCase : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(UpperCamelCase ) def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" def count_of_possible_combinations_with_dp_array( UpperCamelCase : int , UpperCamelCase : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A__ : str =sum( count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase ) for item in array ) A__ : List[str] =answer return answer A__ : List[Any] =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase ) def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" A__ : str =[0] * (target + 1) A__ : Optional[Any] =1 for i in range(1 , target + 1 ): for j in range(UpperCamelCase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[Any] = 3 __A : Optional[Any] = 5 __A : int = [1, 2, 5] print(combination_sum_iv(n, array, target))
656
0
import os import sys import unittest _snake_case : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _snake_case : Optional[Any] = os.path.join(git_repo_path, "src", "transformers") _snake_case : int = "\n{0} = None\n" _snake_case : Dict = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" _snake_case : Tuple = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class a (unittest.TestCase ): """simple docstring""" def __snake_case ( self : Dict ) -> Optional[int]: __snake_case : str = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(lowerCamelCase ) __snake_case : str = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(lowerCamelCase , "tokenizers" ) __snake_case : List[str] = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(lowerCamelCase , "tensorflow_text" ) __snake_case : Any = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(lowerCamelCase , "sentencepiece_and_tokenizers" ) __snake_case : List[Any] = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(lowerCamelCase , "sentencepiece_and_tensorflow_text" ) __snake_case : Union[str, Any] = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(lowerCamelCase , "sentencepiece_and_tokenizers_and_vision" ) def __snake_case ( self : List[Any] ) -> Optional[int]: __snake_case : List[Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , lowerCamelCase ) self.assertIn("tensorflow_text" , lowerCamelCase ) self.assertIn("sentencepiece_and_tokenizers" , lowerCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def __snake_case ( self : Dict ) -> Union[str, Any]: __snake_case : int = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(lowerCamelCase , "\nCONSTANT = None\n" ) __snake_case : Tuple = create_dummy_object("function" , "'torch'" ) self.assertEqual( lowerCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) __snake_case : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" __snake_case : List[Any] = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(lowerCamelCase , lowerCamelCase ) def __snake_case ( self : Optional[Any] ) -> Optional[Any]: __snake_case : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" __snake_case : Any = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , lowerCamelCase )
81
"""simple docstring""" import math import tensorflow as tf from packaging import version def lowercase ( UpperCamelCase : Optional[Any] ): """simple docstring""" A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowercase ( UpperCamelCase : Optional[int] ): """simple docstring""" A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : Tuple =tf.cast(math.pi , x.dtype ) A__ : Dict =tf.cast(0.04_47_15 , x.dtype ) A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) )) return x * cdf def lowercase ( UpperCamelCase : Optional[int] ): """simple docstring""" A__ : List[str] =tf.convert_to_tensor(UpperCamelCase ) return x * tf.tanh(tf.math.softplus(UpperCamelCase ) ) def lowercase ( UpperCamelCase : List[str] ): """simple docstring""" A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype ) A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowercase ( UpperCamelCase : List[Any] ): """simple docstring""" A__ : List[str] =tf.convert_to_tensor(UpperCamelCase ) A__ : str =tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowercase ( UpperCamelCase : Tuple ): """simple docstring""" return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 ) def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ): """simple docstring""" A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase ) return a * tf.math.sigmoid(UpperCamelCase ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def lowercase ( UpperCamelCase : int ): """simple docstring""" return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase ) __A : Optional[Any] = tf.keras.activations.gelu __A : Optional[Any] = approximate_gelu_wrap else: __A : Any = _gelu __A : Union[str, Any] = _gelu_new __A : List[str] = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def lowercase ( UpperCamelCase : List[Any] ): """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
656
0
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase = { """configuration_trajectory_transformer""": [ """TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrajectoryTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrajectoryTransformerModel""", """TrajectoryTransformerPreTrainedModel""", """load_tf_weights_in_trajectory_transformer""", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
82
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict ): A__ : Optional[Any] =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ): A__ : Tuple =parent A__ : List[Any] =batch_size A__ : List[Any] =image_size A__ : Union[str, Any] =num_channels A__ : Optional[int] =num_encoder_blocks A__ : Any =sr_ratios A__ : Any =depths A__ : List[Any] =hidden_sizes A__ : List[Any] =downsampling_rates A__ : List[str] =num_attention_heads A__ : int =is_training A__ : List[Any] =use_labels A__ : Any =hidden_act A__ : Dict =hidden_dropout_prob A__ : int =attention_probs_dropout_prob A__ : List[Any] =initializer_range A__ : Tuple =num_labels A__ : List[Any] =scope def _UpperCAmelCase ( self : Optional[int] ): A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ : Any =None if self.use_labels: A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A__ : List[Any] =self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self : Tuple ): return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ): A__ : Any =SegformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : Dict =model(UpperCamelCase__ ) A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ): A__ : str =self.num_labels A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : Optional[Any] =model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ): A__ : Tuple =1 A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ ) A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertGreater(result.loss , 0.0 ) def _UpperCAmelCase ( self : str ): A__ : Union[str, Any] =self.prepare_config_and_inputs() A__ , A__ , A__ : Tuple =config_and_inputs A__ : Tuple ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : Dict = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) __magic_name__ : Optional[int] = ( { """feature-extraction""": SegformerModel, """image-classification""": SegformerForImageClassification, """image-segmentation""": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ : Dict = True __magic_name__ : List[str] = False __magic_name__ : Optional[Any] = False __magic_name__ : str = False def _UpperCAmelCase ( self : Union[str, Any] ): A__ : Union[str, Any] =SegformerModelTester(self ) A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ ) def _UpperCAmelCase ( self : str ): self.config_tester.run_common_tests() def _UpperCAmelCase ( self : Dict ): A__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] ): A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ ) @unittest.skip("SegFormer does not use inputs_embeds" ) def _UpperCAmelCase ( self : Dict ): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" ) def _UpperCAmelCase ( self : Tuple ): pass def _UpperCAmelCase ( self : List[str] ): A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : int =model_class(UpperCamelCase__ ) A__ : Optional[int] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ : Optional[int] =[*signature.parameters.keys()] A__ : List[str] =["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _UpperCAmelCase ( self : str ): A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() A__ : Union[str, Any] =True for model_class in self.all_model_classes: A__ : Optional[Any] =True A__ : Union[str, Any] =False A__ : str =True A__ : Optional[int] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Any =outputs.attentions A__ : List[str] =sum(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ : Dict =True A__ : str =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Union[str, Any] =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first attentions (first block, first layer) A__ : List[Any] =(self.model_tester.image_size // 4) ** 2 A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) A__ : Tuple =(self.model_tester.image_size // 32) ** 2 A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) A__ : int =len(UpperCamelCase__ ) # Check attention is always last and order is fine A__ : Optional[Any] =True A__ : Any =True A__ : Union[str, Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) ) A__ : Optional[Any] =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first attentions (first block, first layer) A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2 A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def _UpperCAmelCase ( self : List[Any] ): def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ): A__ : Optional[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Optional[Any] =outputs.hidden_states A__ : int =self.model_tester.num_encoder_blocks self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : Optional[Any] =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ : str =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[int] ): if not self.model_tester.is_training: return A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common() A__ : List[Any] =True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase__ ): continue A__ : List[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss loss.backward() @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _UpperCAmelCase ( self : Tuple ): pass @slow def _UpperCAmelCase ( self : Tuple ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowercase ( ): """simple docstring""" A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @slow def _UpperCAmelCase ( self : Tuple ): # only resize + normalize A__ : List[Any] =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCamelCase__ ) A__ : Union[str, Any] =prepare_img() A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : int =model(UpperCamelCase__ ) A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A__ : Optional[int] =torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _UpperCAmelCase ( self : Union[str, Any] ): # only resize + normalize A__ : Dict =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : int =SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ ) A__ : Tuple =prepare_img() A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : int =model(UpperCamelCase__ ) A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A__ : List[Any] =torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) ) @slow def _UpperCAmelCase ( self : int ): # only resize + normalize A__ : Optional[Any] =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCamelCase__ ) A__ : str =prepare_img() A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : Dict =model(UpperCamelCase__ ) A__ : Any =outputs.logits.detach().cpu() A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] ) A__ : List[str] =torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) A__ : Tuple =torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
656
0
"""simple docstring""" import math import unittest def snake_case_ ( A_ : int ): '''simple docstring''' assert isinstance(A_, A_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(1_1 ) ) self.assertTrue(is_prime(1_3 ) ) self.assertTrue(is_prime(1_7 ) ) self.assertTrue(is_prime(1_9 ) ) self.assertTrue(is_prime(2_3 ) ) self.assertTrue(is_prime(2_9 ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" with self.assertRaises(__lowerCAmelCase ): is_prime(-1_9 ) self.assertFalse( is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , ) self.assertFalse( is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
83
"""simple docstring""" import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ): A__ : str =parent A__ : List[str] =batch_size A__ : Any =seq_length A__ : List[str] =is_training A__ : List[Any] =use_attention_mask A__ : List[Any] =use_token_type_ids A__ : Dict =use_labels A__ : List[Any] =vocab_size A__ : Optional[int] =hidden_size A__ : Optional[Any] =num_hidden_layers A__ : str =num_attention_heads A__ : int =intermediate_size A__ : Tuple =hidden_act A__ : Tuple =hidden_dropout_prob A__ : Dict =attention_probs_dropout_prob A__ : Any =max_position_embeddings A__ : Any =type_vocab_size A__ : Union[str, Any] =type_sequence_label_size A__ : Optional[Any] =initializer_range A__ : int =num_choices def _UpperCAmelCase ( self : Tuple ): A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : List[str] =None if self.use_attention_mask: A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] ) A__ : str =None if self.use_token_type_ids: A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Any =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self : Tuple ): A__ : Dict =self.prepare_config_and_inputs() A__ , A__ , A__ , A__ : str =config_and_inputs A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _UpperCAmelCase ( self : int ): A__ : str =self.prepare_config_and_inputs() A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs A__ : Union[str, Any] =True A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : Union[str, Any] = True __magic_name__ : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self : Optional[int] ): A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self ) @slow def _UpperCAmelCase ( self : List[Any] ): for model_class_name in self.all_model_classes: A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : Union[str, Any] =model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @slow def _UpperCAmelCase ( self : Tuple ): A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) A__ : str =model(UpperCamelCase__ )[0] A__ : List[Any] =[1, 11, 50265] self.assertEqual(list(output.shape ) , UpperCamelCase__ ) # compare the actual values for a slice. A__ : Any =np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _UpperCAmelCase ( self : List[Any] ): A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) A__ : Dict =model(UpperCamelCase__ )[0] # compare the actual values for a slice. A__ : Optional[Any] =np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
656
0
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers UpperCAmelCase = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
84
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __A : List[Any] = logging.get_logger(__name__) __A : Any = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] __A : Optional[int] = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def lowercase ( UpperCamelCase : Tuple ): """simple docstring""" A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" ) return sd def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ): """simple docstring""" A__ : List[str] =OrderedDict() A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A__ : Optional[Any] =key for name_pair in rename_keys_prefix: A__ : int =new_key.replace(name_pair[0] , name_pair[1] ) A__ : Dict =d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A__ : Optional[int] =new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: A__ : Any ="pretraining" if "vcr" in checkpoint_path: A__ : Union[str, Any] ={"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: A__ : Optional[Any] ={"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: A__ : List[str] ={"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 512} A__ : List[str] ="multichoice" elif "vqa_advanced" in checkpoint_path: A__ : Any ={"visual_embedding_dim": 2048} A__ : str ="vqa_advanced" elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129} A__ : str ="vqa" elif "nlvr" in checkpoint_path: A__ : str ={ "visual_embedding_dim": 1024, "num_labels": 2, } A__ : Dict ="nlvr" A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase ) # Load State Dict A__ : int =load_state_dict(UpperCamelCase ) A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase ) if model_type == "pretraining": A__ : str =VisualBertForPreTraining(UpperCamelCase ) elif model_type == "vqa": A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase ) elif model_type == "nlvr": A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase ) elif model_type == "multichoice": A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) # Save Checkpoints Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": __A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") __A : str = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
656
0
from ....utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) class snake_case ( UpperCamelCase_ ): def __init__( self : Tuple , a_ : str , a_ : str=None , a_ : int=2048 )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = config.__dict__ SCREAMING_SNAKE_CASE__ : List[str] = modal_hidden_size if num_labels: SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
85
"""simple docstring""" __A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)} def lowercase ( UpperCamelCase : int ): """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) ) def lowercase ( ): """simple docstring""" return sum( number for number in range(1000 , 1000000 ) if number == digits_fifth_powers_sum(UpperCamelCase ) ) if __name__ == "__main__": print(solution())
656
0
def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = len(__UpperCamelCase ) while cur > 1: # Find the maximum number in arr A_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi A_ = arr[mi::-1] + arr[mi + 1 : len(__UpperCamelCase )] # Reverse whole list A_ = arr[cur - 1 :: -1] + arr[cur : len(__UpperCamelCase )] cur -= 1 return arr if __name__ == "__main__": __a :Optional[Any] = input('Enter numbers separated by a comma:\n').strip() __a :List[Any] = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
86
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __A : Optional[Any] = logging.get_logger(__name__) # General docstring __A : str = "PoolFormerConfig" # Base docstring __A : Optional[Any] = "sail/poolformer_s12" __A : List[Any] = [1, 512, 7, 7] # Image classification docstring __A : List[str] = "sail/poolformer_s12" __A : Tuple = "tabby, tabby cat" __A : Tuple = [ "sail/poolformer_s12", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ): """simple docstring""" if drop_prob == 0.0 or not training: return input A__ : Tuple =1 - drop_prob A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor return output class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ): super().__init__() A__ : Optional[int] =drop_prob def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ): return drop_path(UpperCamelCase__ , self.drop_prob , self.training ) def _UpperCAmelCase ( self : List[str] ): return "p={}".format(self.drop_prob ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): super().__init__() A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size) A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride) A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding) A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ ) A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ): A__ : List[str] =self.projection(UpperCamelCase__ ) A__ : Any =self.norm(UpperCamelCase__ ) return embeddings class __lowerCAmelCase ( nn.GroupNorm): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ): super().__init__() A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ): return self.pool(UpperCamelCase__ ) - hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ): super().__init__() A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Dict =PoolFormerDropPath(UpperCamelCase__ ) if isinstance(config.hidden_act , UpperCamelCase__ ): A__ : Tuple =ACTaFN[config.hidden_act] else: A__ : Optional[Any] =config.hidden_act def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ): A__ : Optional[Any] =self.conva(UpperCamelCase__ ) A__ : List[str] =self.act_fn(UpperCamelCase__ ) A__ : List[str] =self.drop(UpperCamelCase__ ) A__ : Optional[int] =self.conva(UpperCamelCase__ ) A__ : Optional[Any] =self.drop(UpperCamelCase__ ) return hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ): super().__init__() A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ ) A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) # Useful for training neural nets A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity() A__ : Optional[Any] =config.use_layer_scale if config.use_layer_scale: A__ : List[str] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) A__ : List[Any] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ): if self.use_layer_scale: A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) ) A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : Tuple =() A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) ) A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection A__ : str =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : List[Any] =(output,) + outputs return outputs else: A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) ) # First residual connection A__ : Optional[Any] =pooling_output + hidden_states A__ : Tuple =() # Second residual connection inside the PoolFormerOutput block A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) ) A__ : Any =hidden_states + layer_output A__ : Tuple =(output,) + outputs return outputs class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : List[str] ): super().__init__() A__ : Tuple =config # stochastic depth decay rule A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings A__ : Tuple =[] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) A__ : List[str] =nn.ModuleList(UpperCamelCase__ ) # Transformer blocks A__ : Union[str, Any] =[] A__ : Any =0 for i in range(config.num_encoder_blocks ): # each block consists of layers A__ : Union[str, Any] =[] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(UpperCamelCase__ ) ) A__ : str =nn.ModuleList(UpperCamelCase__ ) def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ): A__ : Union[str, Any] =() if output_hidden_states else None A__ : Dict =pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): A__ , A__ : List[Any] =layers # Get patch embeddings from hidden_states A__ : Any =embedding_layer(UpperCamelCase__ ) # Send the embeddings through the blocks for _, blk in enumerate(UpperCamelCase__ ): A__ : List[str] =blk(UpperCamelCase__ ) A__ : Tuple =layer_outputs[0] if output_hidden_states: A__ : List[Any] =all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : List[str] = PoolFormerConfig __magic_name__ : int = """poolformer""" __magic_name__ : Any = """pixel_values""" __magic_name__ : Any = True def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ): if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCamelCase__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): A__ : Optional[Any] =value __A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n" @add_start_docstrings( """The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : Dict ): super().__init__(UpperCamelCase__ ) A__ : List[Any] =config A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ ) # Initialize weights and apply final processing self.post_init() def _UpperCAmelCase ( self : Tuple ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : int =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) A__ : List[Any] =self.encoder( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : int =encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ): super().__init__() A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size ) def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ): A__ : int =self.dense(UpperCamelCase__ ) return output @add_start_docstrings( """ PoolFormer Model transformer with an image classification head on top """ , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : str ): super().__init__(UpperCamelCase__ ) A__ : List[str] =config.num_labels A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ ) # Final norm A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head A__ : Dict =( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict A__ : List[str] =self.poolformer( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : str =outputs[0] A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) ) A__ : Optional[Any] =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A__ : int ="regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A__ : Tuple ="single_label_classification" else: A__ : Optional[int] ="multi_label_classification" if self.config.problem_type == "regression": A__ : Dict =MSELoss() if self.num_labels == 1: A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() ) else: A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) elif self.config.problem_type == "single_label_classification": A__ : Tuple =CrossEntropyLoss() A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A__ : List[Any] =BCEWithLogitsLoss() A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) if not return_dict: A__ : Optional[int] =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
656
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Dict = { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""", """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json""" ), """distilbert-base-uncased-finetuned-sst-2-english""": ( """https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json""" ), } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''distilbert''' UpperCAmelCase__ = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self : int , UpperCAmelCase__ : Dict=30_522 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : int=False , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : List[Any]=4 * 768 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[str]=0.2 , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] , ) ->str: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = sinusoidal_pos_embds A__ = n_layers A__ = n_heads A__ = dim A__ = hidden_dim A__ = dropout A__ = attention_dropout A__ = activation A__ = initializer_range A__ = qa_dropout A__ = seq_classif_dropout super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE ( self : int) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ])
87
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : int = IFInpaintingSuperResolutionPipeline __magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""}) __magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def _UpperCAmelCase ( self : Union[str, Any] ): return self._get_superresolution_dummy_components() def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ): if str(UpperCamelCase__ ).startswith("mps" ): A__ : Any =torch.manual_seed(UpperCamelCase__ ) else: A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : List[str] ={ "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _UpperCAmelCase ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _UpperCAmelCase ( self : int ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _UpperCAmelCase ( self : Tuple ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _UpperCAmelCase ( self : str ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _UpperCAmelCase ( self : Dict ): self._test_save_load_local() def _UpperCAmelCase ( self : Optional[int] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
656
0
"""simple docstring""" def _snake_case ( __snake_case : int ): """simple docstring""" if divisor % 5 == 0 or divisor % 2 == 0: return 0 _lowerCamelCase : Union[str, Any] = 1 _lowerCamelCase : Optional[int] = 1 while repunit: _lowerCamelCase : Optional[int] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def _snake_case ( __snake_case : int = 1000000 ): """simple docstring""" _lowerCamelCase : Dict = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__snake_case ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
88
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A : Any = { "configuration_efficientformer": [ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientFormerConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["EfficientFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", "EfficientFormerPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", "TFEfficientFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
656
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[Any] = "▁" SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"} SCREAMING_SNAKE_CASE : str = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } SCREAMING_SNAKE_CASE : int = { "facebook/mbart-large-50-one-to-many-mmt": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[str] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class _lowerCamelCase( _a ): lowercase_ : List[Any] = VOCAB_FILES_NAMES lowercase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowercase_ : Tuple = ["""input_ids""", """attention_mask"""] lowercase_ : List[int] = [] lowercase_ : List[int] = [] def __init__( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", lowerCamelCase = None, **lowerCamelCase, ) -> None: """simple docstring""" _lowercase : Optional[Any] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else mask_token _lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs _lowercase : Any = kwargs.get('additional_special_tokens', []) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowerCamelCase, tgt_lang=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, ) _lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowerCamelCase)) _lowercase : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _lowercase : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowercase : Union[str, Any] = 1 _lowercase : Union[str, Any] = len(self.sp_model) _lowercase : Any = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase) } _lowercase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()} _lowercase : Any = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) _lowercase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowercase : Any = src_lang if src_lang is not None else 'en_XX' _lowercase : Union[str, Any] = self.lang_code_to_id[self._src_lang] _lowercase : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def UpperCamelCase ( self) -> int: """simple docstring""" return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCamelCase ( self) -> str: """simple docstring""" return self._src_lang @src_lang.setter def UpperCamelCase ( self, lowerCamelCase) -> None: """simple docstring""" _lowercase : str = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__( self) -> Dict: """simple docstring""" _lowercase : Tuple = self.__dict__.copy() _lowercase : Optional[int] = None return state def __setstate__( self, lowerCamelCase) -> None: """simple docstring""" _lowercase : Optional[Any] = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs'): _lowercase : str = {} _lowercase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : Dict = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def UpperCamelCase ( self, lowerCamelCase) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase) -> int: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowercase : int = self.sp_model.PieceToId(lowerCamelCase) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase ( self, lowerCamelCase) -> str: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]: """simple docstring""" _lowercase : int = [] _lowercase : str = '' _lowercase : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase) + token _lowercase : str = True _lowercase : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase) _lowercase : List[Any] = False out_string += self.sp_model.decode(lowerCamelCase) return out_string.strip() def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return _lowercase : Dict = os.path.join( lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, lowerCamelCase) elif not os.path.isfile(self.vocab_file): with open(lowerCamelCase, 'wb') as fi: _lowercase : Tuple = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase) return (out_vocab_file,) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase) _lowercase : List[str] = [1] * len(self.prefix_tokens) _lowercase : Dict = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCamelCase)) + suffix_ones return prefix_ones + ([0] * len(lowerCamelCase)) + ([0] * len(lowerCamelCase)) + suffix_ones def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> List[str]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') _lowercase : str = src_lang _lowercase : List[str] = self(lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase) _lowercase : Dict = self.convert_tokens_to_ids(lowerCamelCase) _lowercase : int = tgt_lang_id return inputs def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = "en_XX", lowerCamelCase = None, lowerCamelCase = "ro_RO", **lowerCamelCase, ) -> BatchEncoding: """simple docstring""" _lowercase : Dict = src_lang _lowercase : str = tgt_lang return super().prepare_seqaseq_batch(lowerCamelCase, lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang) def UpperCamelCase ( self) -> Any: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang) def UpperCamelCase ( self, lowerCamelCase) -> None: """simple docstring""" _lowercase : Any = self.lang_code_to_id[src_lang] _lowercase : List[str] = [self.cur_lang_code_id] _lowercase : int = [self.eos_token_id] def UpperCamelCase ( self, lowerCamelCase) -> None: """simple docstring""" _lowercase : str = self.lang_code_to_id[tgt_lang] _lowercase : str = [self.cur_lang_code_id] _lowercase : Union[str, Any] = [self.eos_token_id]
89
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Tuple =[] for _ in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Dict =[] for step in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" ) torch.save(scheduler.state_dict() , UpperCamelCase ) A__ : Dict =torch.load(UpperCamelCase ) scheduler.load_state_dict(UpperCamelCase ) return lrs @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] ) A__ : Any =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _UpperCAmelCase ( self : Dict ): A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Dict =torch.tensor([0.4, 0.2, -0.5] ) A__ : Optional[int] =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : int =Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , ) for _ in range(1000 ): A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' __magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None __magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None __magic_name__ : Union[str, Any] = 10 def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A__ : Union[str, Any] ={ get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A__ , A__ : Any =data A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : str ): A__ : int =fn def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ): return self.fn(*UpperCamelCase__ , **UpperCamelCase__ ) @classmethod def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ): A__ : str =list(map(self , scheduler.lr_lambdas ) )
656
0
'''simple docstring''' class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ ) -> None: lowerCAmelCase__ = set_counts lowerCAmelCase__ = max(lowerCamelCase_ ) lowerCAmelCase__ = len(lowerCamelCase_ ) lowerCAmelCase__ = [1] * num_sets lowerCAmelCase__ = list(range(lowerCamelCase_ ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> bool: lowerCAmelCase__ = self.get_parent(lowerCamelCase_ ) lowerCAmelCase__ = self.get_parent(lowerCamelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowerCAmelCase__ = 0 lowerCAmelCase__ = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowerCAmelCase__ = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowerCAmelCase__ = 0 lowerCAmelCase__ = src_parent lowerCAmelCase__ = self.set_counts[src_parent] lowerCAmelCase__ = max(self.max_set , lowerCamelCase_ ) return True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: if self.parents[disj_set] == disj_set: return disj_set lowerCAmelCase__ = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
90
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A : List[Any] = logging.get_logger("transformers.models.speecht5") __A : Optional[Any] = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A : Optional[int] = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A : List[str] = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A : List[Any] = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A : Union[str, Any] = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A : Any = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A : Union[str, Any] = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A : Optional[int] = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A : Union[str, Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A : Optional[Any] = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : Optional[int] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : int = [] __A : int = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A : Optional[Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A : Tuple = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A : Union[str, Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ): """simple docstring""" for attribute in key.split("." ): A__ : Dict =getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape else: A__ : Tuple =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": A__ : Any =value elif weight_type == "weight_g": A__ : Any =value elif weight_type == "weight_v": A__ : Any =value elif weight_type == "bias": A__ : Tuple =value elif weight_type == "running_mean": A__ : Dict =value elif weight_type == "running_var": A__ : List[str] =value elif weight_type == "num_batches_tracked": A__ : Dict =value else: A__ : Optional[int] =value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ): """simple docstring""" for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A__ , A__ : List[str] =key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ): """simple docstring""" A__ : Tuple =[] if task == "s2t": A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder A__ : int =MAPPING_S2T A__ : List[Any] =IGNORE_KEYS_S2T elif task == "t2s": A__ : Union[str, Any] =None A__ : List[Any] =MAPPING_T2S A__ : Tuple =IGNORE_KEYS_T2S elif task == "s2s": A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder A__ : Tuple =MAPPING_S2S A__ : Any =IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase , UpperCamelCase ): logger.info(F'''{name} was ignored''' ) continue A__ : Optional[Any] =False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , ) A__ : List[Any] =True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: A__ , A__ : Dict =key.split(".*." ) if prefix in name and suffix in name: A__ : int =suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: A__ : List[Any] =True if "*" in mapped_key: A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2] A__ : int =mapped_key.replace("*" , UpperCamelCase ) if "weight_g" in name: A__ : str ="weight_g" elif "weight_v" in name: A__ : Optional[Any] ="weight_v" elif "bias" in name: A__ : Any ="bias" elif "weight" in name: A__ : Optional[int] ="weight" elif "running_mean" in name: A__ : Tuple ="running_mean" elif "running_var" in name: A__ : Optional[int] ="running_var" elif "num_batches_tracked" in name: A__ : str ="num_batches_tracked" else: A__ : List[Any] =None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ): """simple docstring""" A__ : Any =full_name.split("conv_layers." )[-1] A__ : Dict =name.split("." ) A__ : int =int(items[0] ) A__ : str =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A__ : Optional[Any] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A__ : Optional[int] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase ) @torch.no_grad() def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ): """simple docstring""" if config_path is not None: A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase ) else: A__ : Any =SpeechTaConfig() if task == "s2t": A__ : Union[str, Any] =config.max_text_positions A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase ) elif task == "t2s": A__ : str =1876 A__ : Optional[int] =600 A__ : Tuple =config.max_speech_positions A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase ) elif task == "s2s": A__ : str =1876 A__ : Tuple =config.max_speech_positions A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) A__ : int =mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) A__ : Dict =SpeechTaFeatureExtractor() A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) A__ : Union[str, Any] =torch.load(UpperCamelCase ) recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase ) model.push_to_hub(UpperCamelCase ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A : str = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
656
0
"""simple docstring""" from math import factorial def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float ): if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) A = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A = float(factorial(snake_case__ ) ) coefficient /= factorial(snake_case__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
91
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase): '''simple docstring''' __magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ): super().__init__() A__ : Dict =prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) A__ : Optional[int] =prefix_inner_dim A__ : Optional[int] =prefix_hidden_dim A__ : Optional[int] =( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : Optional[int] =( nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : str =GPTaConfig( vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , ) A__ : Any =GPTaLMHeadModel(UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ): A__ : int =self.transformer.transformer.wte(UpperCamelCase__ ) A__ : Tuple =self.encode_prefix(UpperCamelCase__ ) A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ ) A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 ) A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ): return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ): return self.encode_prefix(UpperCamelCase__ ) @torch.no_grad() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ): A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 ) A__ : List[str] =[] A__ : Dict =[] for feature in features: A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature # Only support beam search for now A__ , A__ : Optional[Any] =self.generate_beam( input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A__ : Optional[Any] =torch.stack(UpperCamelCase__ ) A__ : Optional[int] =torch.stack(UpperCamelCase__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ): A__ : str =eos_token_id A__ : Optional[Any] =None A__ : int =None A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int ) A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool ) if input_embeds is not None: A__ : Union[str, Any] =input_embeds else: A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ ) A__ : Tuple =outputs.logits A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A__ : Optional[Any] =logits.softmax(-1 ).log() if scores is None: A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 ) A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] ) A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A__ : str =next_tokens else: A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] ) A__ : str =torch.cat((tokens, next_tokens) , dim=1 ) else: A__ : Union[str, Any] =-float(np.inf ) A__ : Dict =0 A__ : Optional[Any] =scores[:, None] + logits seq_lengths[~is_stopped] += 1 A__ : Optional[Any] =scores_sum / seq_lengths[:, None] A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 ) A__ : Tuple =next_tokens // scores_sum.shape[1] A__ : List[Any] =seq_lengths[next_tokens_source] A__ : int =next_tokens % scores_sum.shape[1] A__ : str =next_tokens.unsqueeze(1 ) A__ : List[Any] =tokens[next_tokens_source] A__ : int =torch.cat((tokens, next_tokens) , dim=1 ) A__ : List[str] =generated[next_tokens_source] A__ : Optional[Any] =scores_sum_average * seq_lengths A__ : Optional[int] =is_stopped[next_tokens_source] A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A__ : str =torch.cat((generated, next_token_embed) , dim=1 ) A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze() if is_stopped.all(): break A__ : Optional[int] =scores / seq_lengths A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ ) # tokens tensors are already padded to max_seq_length A__ : int =[tokens[i] for i in order] A__ : Any =torch.stack(UpperCamelCase__ , dim=0 ) A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
656
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCamelCase_ = TypeVar("""T""") UpperCamelCase_ = TypeVar("""U""") class __SCREAMING_SNAKE_CASE ( Generic[T, U] ): def __init__( self : List[str] , UpperCAmelCase__ : T | None , UpperCAmelCase__ : U | None ): '''simple docstring''' lowercase : List[str] =key lowercase : Optional[Any] =val lowercase : DoubleLinkedListNode[T, U] | None =None lowercase : DoubleLinkedListNode[T, U] | None =None def __repr__( self : Any ): '''simple docstring''' return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class __SCREAMING_SNAKE_CASE ( Generic[T, U] ): def __init__( self : int ): '''simple docstring''' lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase , lowercase : Any =self.rear, self.head def __repr__( self : Union[str, Any] ): '''simple docstring''' lowercase : Any =['''DoubleLinkedList'''] lowercase : Union[str, Any] =self.head while node.next is not None: rep.append(str(UpperCAmelCase__ ) ) lowercase : Any =node.next rep.append(str(self.rear ) ) return ",\n ".join(UpperCAmelCase__ ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ): '''simple docstring''' lowercase : Optional[int] =self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowercase : Dict =node lowercase : str =previous lowercase : Optional[Any] =node lowercase : Optional[int] =self.rear def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ): '''simple docstring''' if node.prev is None or node.next is None: return None lowercase : Any =node.next lowercase : Optional[int] =node.prev lowercase : Any =None lowercase : int =None return node class __SCREAMING_SNAKE_CASE ( Generic[T, U] ): lowerCamelCase_ = {} def __init__( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : DoubleLinkedList[T, U] =DoubleLinkedList() lowercase : Union[str, Any] =capacity lowercase : int =0 lowercase : List[Any] =0 lowercase : Optional[Any] =0 lowercase : dict[T, DoubleLinkedListNode[T, U]] ={} def __repr__( self : List[str] ): '''simple docstring''' return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self : Dict , UpperCAmelCase__ : T ): '''simple docstring''' return key in self.cache def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : T ): '''simple docstring''' # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 lowercase : DoubleLinkedListNode[T, U] =self.cache[key] lowercase : int =self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(UpperCAmelCase__ ) return node.val self.miss += 1 return None def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : T , UpperCAmelCase__ : U ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowercase : Dict =self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(UpperCAmelCase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowercase : Dict =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value lowercase : Union[str, Any] =self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list lowercase : Any =value self.list.add(UpperCAmelCase__ ) @classmethod def lowerCamelCase_ ( cls : Optional[Any] , UpperCAmelCase__ : int = 128 ): '''simple docstring''' def cache_decorator_inner(UpperCAmelCase__ : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*UpperCAmelCase__ : T ) -> U: if func not in cls.decorator_function_to_instance_map: lowercase : Dict =LRUCache(UpperCAmelCase__ ) lowercase : Tuple =cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: lowercase : Union[str, Any] =func(*UpperCAmelCase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(UpperCAmelCase__ , '''cache_info''' , UpperCAmelCase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
92
"""simple docstring""" import os def lowercase ( ): """simple docstring""" A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) ) A__ : str =os.path.join(UpperCamelCase , "triangle.txt" ) with open(UpperCamelCase ) as f: A__ : Optional[int] =f.readlines() A__ : str =[] for line in triangle: A__ : Union[str, Any] =[] for number in line.strip().split(" " ): numbers_from_line.append(int(UpperCamelCase ) ) a.append(UpperCamelCase ) for i in range(1 , len(UpperCamelCase ) ): for j in range(len(a[i] ) ): A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0 A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(UpperCamelCase , UpperCamelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
656
0
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE = 6008_5147_5143 ) ->int: """simple docstring""" try: lowerCAmelCase__ :Optional[int] = int(_SCREAMING_SNAKE_CASE ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) lowerCAmelCase__ :List[Any] = 1 lowerCAmelCase__ :Optional[Any] = 2 while i * i <= n: while n % i == 0: lowerCAmelCase__ :Any = i n //= i i += 1 if n > 1: lowerCAmelCase__ :Any = n return int(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F'''{solution() = }''')
93
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A : int = logging.get_logger(__name__) def lowercase ( UpperCamelCase : Any ): """simple docstring""" A__ : str =OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): A__ : Dict =key.replace("module.encoder" , "glpn.encoder" ) if key.startswith("module.decoder" ): A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )] A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' ) if "norm" in key: A__ : Dict =key.replace("norm" , "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' ) if "layer_norm1" in key: A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 A__ : int =key[key.find("block" ) + len("block" )] A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' ) if "attn.q" in key: A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: A__ : str =key.replace("attn" , "attention.self" ) if "fc1" in key: A__ : Dict =key.replace("fc1" , "dense1" ) if "fc2" in key: A__ : str =key.replace("fc2" , "dense2" ) if "linear_pred" in key: A__ : List[Any] =key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" ) A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A__ : str =key[key.find("linear_c" ) + len("linear_c" )] A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' ) if "bot_conv" in key: A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" ) if "skip_conv1" in key: A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" ) if "skip_conv2" in key: A__ : int =key.replace("skip_conv2" , "2.convolution" ) if "fusion1" in key: A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" ) if "fusion2" in key: A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" ) if "fusion3" in key: A__ : int =key.replace("fusion3" , "3.fusion" ) if "fusion" in key and "conv" in key: A__ : List[str] =key.replace("conv" , "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" ) A__ : int =value return new_state_dict def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ): """simple docstring""" # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict A__ : List[str] =kv_weight[ : config.hidden_sizes[i], : ] A__ : Dict =kv_bias[: config.hidden_sizes[i]] A__ : Any =kv_weight[ config.hidden_sizes[i] :, : ] A__ : Any =kv_bias[config.hidden_sizes[i] :] def lowercase ( ): """simple docstring""" A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg" A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return image @torch.no_grad() def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ): """simple docstring""" A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) A__ : str =GLPNImageProcessor() # prepare image A__ : Any =prepare_img() A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) ) # rename keys A__ : Union[str, Any] =rename_keys(UpperCamelCase ) # key and value matrices need special treatment read_in_k_v(UpperCamelCase , UpperCamelCase ) # create HuggingFace model and load state dict A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # forward pass A__ : int =model(UpperCamelCase ) A__ : Optional[Any] =outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: A__ : List[Any] =torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: A__ : Tuple =torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) A__ : str =torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) __A : Any = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
656
0
'''simple docstring''' def lowercase_ ( __A : int ) -> int: """simple docstring""" if n == 1 or not isinstance(__A , __A ): return 0 elif n == 2: return 1 else: lowercase : Tuple =[0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def lowercase_ ( __A : int ) -> int: """simple docstring""" lowercase : List[str] =0 lowercase : str =2 while digits < n: index += 1 lowercase : int =len(str(fibonacci(__A ) ) ) return index def lowercase_ ( __A : int = 1_0_0_0 ) -> int: """simple docstring""" return fibonacci_digits_index(__A ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
94
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Optional[Any] = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Union[str, Any] = """gpt_neo""" __magic_name__ : Union[str, Any] = ["""past_key_values"""] __magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ): A__ : Optional[Any] =vocab_size A__ : Dict =max_position_embeddings A__ : List[str] =hidden_size A__ : List[Any] =num_layers A__ : Tuple =num_heads A__ : List[str] =intermediate_size A__ : Tuple =window_size A__ : Dict =activation_function A__ : str =resid_dropout A__ : Union[str, Any] =embed_dropout A__ : List[str] =attention_dropout A__ : Tuple =classifier_dropout A__ : int =layer_norm_epsilon A__ : int =initializer_range A__ : str =use_cache A__ : Tuple =bos_token_id A__ : int =eos_token_id A__ : int =attention_types A__ : Any =self.expand_attention_types_params(UpperCamelCase__ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) @staticmethod def _UpperCAmelCase ( UpperCamelCase__ : List[str] ): A__ : Optional[Any] =[] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): """simple docstring""" import torch A__ : List[str] =input.size() A__ : Dict =len(UpperCamelCase ) A__ : Optional[int] =shape[dimension] A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1 A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None] A__ : Tuple =[slice(UpperCamelCase )] * rank A__ : int =indices A__ : Optional[int] =input[s] A__ : Union[str, Any] =list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase ) def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ): """simple docstring""" import torch A__ : List[str] =torch.arange(1 , UpperCamelCase ) A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =remainders == 0 A__ : str =candidates[divisor_indices] A__ : int =torch.max(UpperCamelCase ) return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' @property def _UpperCAmelCase ( self : List[Any] ): A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" ) A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"} else: A__ : Tuple ={0: "batch", 1: "sequence"} return common_inputs @property def _UpperCAmelCase ( self : List[str] ): return self._config.num_heads def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ): A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape # Not using the same length for past_key_values A__ : Union[str, Any] =seqlen + 2 A__ : List[Any] =( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A__ : Optional[Any] =[ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] A__ : Optional[Any] =common_inputs["attention_mask"] if self.use_past: A__ : Any =ordered_inputs["attention_mask"].dtype A__ : Tuple =torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self : List[str] ): return 13
656
0
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def snake_case ( A__ ): return getitem, k def snake_case ( A__ ,A__ ): return setitem, k, v def snake_case ( A__ ): return delitem, k def snake_case ( A__ ,A__ ,*A__ ): try: return fun(A__ ,*A__ ), None except Exception as e: return None, e lowerCamelCase_ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowerCamelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowerCamelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowerCamelCase_ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowerCamelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowerCamelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( "operations" ,( pytest.param(_add_items ,id="add items" ), pytest.param(_overwrite_items ,id="overwrite items" ), pytest.param(_delete_items ,id="delete items" ), pytest.param(_access_absent_items ,id="access absent items" ), pytest.param(_add_with_resize_up ,id="add with resize up" ), pytest.param(_add_with_resize_down ,id="add with resize down" ), ) ,) def snake_case ( A__ ): UpperCAmelCase_ : List[Any] = HashMap(initial_block_size=4 ) UpperCAmelCase_ : List[Any] = {} for _, (fun, *args) in enumerate(A__ ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = _run_operation(A__ ,A__ ,*A__ ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = _run_operation(A__ ,A__ ,*A__ ) assert my_res == py_res assert str(A__ ) == str(A__ ) assert set(A__ ) == set(A__ ) assert len(A__ ) == len(A__ ) assert set(my.items() ) == set(py.items() ) def snake_case ( ): def is_public(A__ ) -> bool: return not name.startswith("_" ) UpperCAmelCase_ : int = {name for name in dir({} ) if is_public(A__ )} UpperCAmelCase_ : Any = {name for name in dir(HashMap() ) if is_public(A__ )} assert dict_public_names > hash_public_names
95
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Any = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Tuple = """megatron-bert""" def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ): super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) A__ : Optional[int] =vocab_size A__ : Optional[int] =hidden_size A__ : str =num_hidden_layers A__ : Any =num_attention_heads A__ : str =hidden_act A__ : Optional[int] =intermediate_size A__ : str =hidden_dropout_prob A__ : str =attention_probs_dropout_prob A__ : List[Any] =max_position_embeddings A__ : List[Any] =type_vocab_size A__ : Tuple =initializer_range A__ : Any =layer_norm_eps A__ : Any =position_embedding_type A__ : Union[str, Any] =use_cache
656
0
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule __lowerCamelCase = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
96
"""simple docstring""" from __future__ import annotations def lowercase ( UpperCamelCase : list[float] ): """simple docstring""" if len(UpperCamelCase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) A__ : Union[str, Any] =nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
656
0
from importlib import import_module from .logging import get_logger __a = get_logger(__name__) class lowercase__: """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=None ) -> Tuple: lowercase_ = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) else module class lowercase__: """simple docstring""" a :str = [] def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int=None ) -> Optional[Any]: lowercase_ = obj lowercase_ = target lowercase_ = new lowercase_ = target.split('''.''' )[0] lowercase_ = {} lowercase_ = attrs or [] def __enter__( self : int ) -> Dict: *lowercase_ , lowercase_ = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(SCREAMING_SNAKE_CASE_ ) ): try: lowercase_ = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): lowercase_ = getattr(self.obj , SCREAMING_SNAKE_CASE_ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): lowercase_ = obj_attr # patch at top level setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs ) ) lowercase_ = getattr(self.obj , SCREAMING_SNAKE_CASE_ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , attrs=self.attrs ) ) lowercase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # finally set the target attribute setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: lowercase_ = getattr(import_module('''.'''.join(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , SCREAMING_SNAKE_CASE_ ) is attr_value: lowercase_ = getattr(self.obj , SCREAMING_SNAKE_CASE_ ) setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" lowercase_ = globals()['''__builtins__'''][target_attr] setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self : Any , *SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]: for attr in list(self.original ): setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Any ) -> str: self.__enter__() self._active_patches.append(self ) def _lowercase ( self : Optional[int] ) -> int: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
97
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A : Optional[Any] = { "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", "MegaForCausalLM", "MegaForMaskedLM", "MegaForMultipleChoice", "MegaForQuestionAnswering", "MegaForSequenceClassification", "MegaForTokenClassification", "MegaModel", "MegaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
656
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase__ : List[Any] = { 'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'], 'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = ['BertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = [ 'BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BertForMaskedLM', 'BertForMultipleChoice', 'BertForNextSentencePrediction', 'BertForPreTraining', 'BertForQuestionAnswering', 'BertForSequenceClassification', 'BertForTokenClassification', 'BertLayer', 'BertLMHeadModel', 'BertModel', 'BertPreTrainedModel', 'load_tf_weights_in_bert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ 'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBertEmbeddings', 'TFBertForMaskedLM', 'TFBertForMultipleChoice', 'TFBertForNextSentencePrediction', 'TFBertForPreTraining', 'TFBertForQuestionAnswering', 'TFBertForSequenceClassification', 'TFBertForTokenClassification', 'TFBertLMHeadModel', 'TFBertMainLayer', 'TFBertModel', 'TFBertPreTrainedModel', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = ['TFBertTokenizer'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = [ 'FlaxBertForCausalLM', 'FlaxBertForMaskedLM', 'FlaxBertForMultipleChoice', 'FlaxBertForNextSentencePrediction', 'FlaxBertForPreTraining', 'FlaxBertForQuestionAnswering', 'FlaxBertForSequenceClassification', 'FlaxBertForTokenClassification', 'FlaxBertModel', 'FlaxBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys lowercase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
98
"""simple docstring""" def lowercase ( UpperCamelCase : int ): """simple docstring""" if num <= 0: raise ValueError("Input must be a positive integer" ) A__ : Union[str, Any] =[True] * (num + 1) A__ : Union[str, Any] =2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , UpperCamelCase ): A__ : str =False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[int] = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
656
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
99
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : List[Any] ): A__ : Tuple =torch.nn.Linear(10 , 10 ) A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 ) A__ : Union[str, Any] =Accelerator() A__ : str =accelerator.prepare(UpperCamelCase__ ) try: pickle.loads(pickle.dumps(UpperCamelCase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
656
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __snake_case : '''simple docstring''' def __init__( self , A_ , A_=13 , A_=10 , A_=3 , A_=2 , A_=2 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_="divided_space_time" , A_=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = image_size SCREAMING_SNAKE_CASE__ = num_channels SCREAMING_SNAKE_CASE__ = patch_size SCREAMING_SNAKE_CASE__ = num_frames SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = attention_type SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = scope SCREAMING_SNAKE_CASE__ = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ = (num_frames) * self.num_patches_per_frame + 1 def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ = self.get_config() return config, pixel_values, labels def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) SCREAMING_SNAKE_CASE__ = self.num_labels return config def lowercase_ ( self , A_ , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TimesformerModel(config=A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , A_ , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TimesformerForVideoClassification(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ ) # verify the logits shape SCREAMING_SNAKE_CASE__ = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCamelCase__ : Optional[int] = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCamelCase__ : List[str] = False lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : Dict = False def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TimesformerModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester( self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def lowercase_ ( self , A_ , A_ , A_=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = copy.deepcopy(A_ ) if return_labels: if model_class in get_values(A_ ): SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A_ ) return inputs_dict def lowercase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowercase_ ( self ): '''simple docstring''' pass def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class(A_ ) SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*A_ ) @slow def lowercase_ ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = TimesformerModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def lowercase_ ( self ): '''simple docstring''' if not self.has_attentions: pass else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = self.model_tester.seq_length SCREAMING_SNAKE_CASE__ = self.model_tester.num_frames SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(A_ , A_ ) ) SCREAMING_SNAKE_CASE__ = outputs.attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(A_ , A_ ) ) SCREAMING_SNAKE_CASE__ = outputs.attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) SCREAMING_SNAKE_CASE__ = len(A_ ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(A_ , A_ ) ) self.assertEqual(out_len + 1 , len(A_ ) ) SCREAMING_SNAKE_CASE__ = outputs.attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase_ ( self ): '''simple docstring''' def check_hidden_states_output(A_ , A_ , A_ ): SCREAMING_SNAKE_CASE__ = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(A_ , A_ ) ) SCREAMING_SNAKE_CASE__ = outputs.hidden_states SCREAMING_SNAKE_CASE__ = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(A_ ) , A_ ) SCREAMING_SNAKE_CASE__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ = True check_hidden_states_output(A_ , A_ , A_ ) def __snake_case ( ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) SCREAMING_SNAKE_CASE__ = np.load(lowerCAmelCase_ ) return list(lowerCAmelCase_ ) @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ): '''simple docstring''' return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( A_ ) SCREAMING_SNAKE_CASE__ = self.default_image_processor SCREAMING_SNAKE_CASE__ = prepare_video() SCREAMING_SNAKE_CASE__ = image_processor(video[:8] , return_tensors='''pt''' ).to(A_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**A_ ) # verify the logits SCREAMING_SNAKE_CASE__ = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , A_ ) SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
100
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __A : Optional[int] = None __A : Union[str, Any] = logging.get_logger(__name__) __A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __A : str = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } __A : List[str] = { "google/bigbird-roberta-base": 4_096, "google/bigbird-roberta-large": 4_096, "google/bigbird-base-trivia-itc": 4_096, } __A : Tuple = "▁" class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Dict = VOCAB_FILES_NAMES __magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP __magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ : List[Any] = BigBirdTokenizer __magic_name__ : Any = ["""input_ids""", """attention_mask"""] __magic_name__ : List[int] = [] def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ): A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) A__ : List[Any] =vocab_file A__ : Optional[int] =False if not self.vocab_file else True def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : Tuple =[self.sep_token_id] A__ : str =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1] def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : Tuple =[self.sep_token_id] A__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return A__ : List[str] =os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
656
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = ["""image_processor""", """tokenizer"""] _UpperCAmelCase = """FlavaImageProcessor""" _UpperCAmelCase = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE_ : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = self.image_processor def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) if images is not None: SCREAMING_SNAKE_CASE_ : int = self.image_processor( lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) if text is not None and images is not None: encoding.update(lowerCAmelCase__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ ) def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @property def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase__ ( self ): """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , ) return self.image_processor_class @property def UpperCamelCase__ ( self ): """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , ) return self.image_processor
101
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[int] = {"vocab_file": "spiece.model"} __A : List[Any] = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ): A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) A__ : Dict =3 A__ : int =do_lower_case A__ : str =remove_space A__ : Optional[Any] =keep_accents A__ : int =vocab_file A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) A__ : Union[str, Any] =jieba A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _UpperCAmelCase ( self : Union[str, Any] ): return len(self.sp_model ) def _UpperCAmelCase ( self : Optional[int] ): A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): A__ : Union[str, Any] =self.__dict__.copy() A__ : Tuple =None return state def __setstate__( self : Tuple , UpperCamelCase__ : int ): A__ : Union[str, Any] =d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A__ : Optional[int] ={} A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ): if self.remove_space: A__ : Optional[int] =" ".join(inputs.strip().split() ) else: A__ : Optional[Any] =inputs A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ ) A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: A__ : str =outputs.lower() return outputs def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ): A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ ) A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) A__ : List[str] =[] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A__ : Union[str, Any] =cur_pieces[1:] else: A__ : List[str] =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ): return self.sp_model.PieceToId(UpperCamelCase__ ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ): return self.sp_model.IdToPiece(UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ): A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : List[str] =[self.sep_token_id] A__ : str =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : List[str] =[self.sep_token_id] A__ : Optional[Any] =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return A__ : Tuple =os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: A__ : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,) def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ): A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ ) A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
656
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ : List[Any] = { """configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : int = [ """LILT_PRETRAINED_MODEL_ARCHIVE_LIST""", """LiltForQuestionAnswering""", """LiltForSequenceClassification""", """LiltForTokenClassification""", """LiltModel""", """LiltPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __magic_name__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
102
"""simple docstring""" def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" def count_of_possible_combinations(UpperCamelCase : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(UpperCamelCase ) def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" def count_of_possible_combinations_with_dp_array( UpperCamelCase : int , UpperCamelCase : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A__ : str =sum( count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase ) for item in array ) A__ : List[str] =answer return answer A__ : List[Any] =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase ) def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" A__ : str =[0] * (target + 1) A__ : Optional[Any] =1 for i in range(1 , target + 1 ): for j in range(UpperCamelCase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[Any] = 3 __A : Optional[Any] = 5 __A : int = [1, 2, 5] print(combination_sum_iv(n, array, target))
656
0
"""simple docstring""" import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin snake_case = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : int = GPTSwaTokenizer A__ : Optional[Any] = False A__ : Any = True A__ : List[str] = False def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _snake_case = GPTSwaTokenizer(__lowerCamelCase , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Any ): """simple docstring""" _snake_case = '''This is a test''' _snake_case = '''This is a test''' return input_text, output_text def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" _snake_case = '''<s>''' _snake_case = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(__lowerCamelCase ) , 2_0_0_0 ) def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = GPTSwaTokenizer(__lowerCamelCase ) _snake_case = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] ) _snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) # fmt: off self.assertListEqual( __lowerCamelCase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on _snake_case = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , ) _snake_case = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) # fmt: off self.assertListEqual( __lowerCamelCase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] ) # fmt: on def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" _snake_case = GPTSwaTokenizer(__lowerCamelCase ) _snake_case = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] _snake_case = [ [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2], [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__lowerCamelCase , __lowerCamelCase ): self.assertListEqual(tokenizer.encode_fast(__lowerCamelCase ) , __lowerCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(__lowerCamelCase , __lowerCamelCase ): self.assertEqual(tokenizer.decode_fast(__lowerCamelCase ) , __lowerCamelCase ) @slow def __UpperCAmelCase ( self : Tuple ): """simple docstring""" _snake_case = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off _snake_case = {'''input_ids''': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__lowerCamelCase , )
103
"""simple docstring""" import math import tensorflow as tf from packaging import version def lowercase ( UpperCamelCase : Optional[Any] ): """simple docstring""" A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowercase ( UpperCamelCase : Optional[int] ): """simple docstring""" A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : Tuple =tf.cast(math.pi , x.dtype ) A__ : Dict =tf.cast(0.04_47_15 , x.dtype ) A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) )) return x * cdf def lowercase ( UpperCamelCase : Optional[int] ): """simple docstring""" A__ : List[str] =tf.convert_to_tensor(UpperCamelCase ) return x * tf.tanh(tf.math.softplus(UpperCamelCase ) ) def lowercase ( UpperCamelCase : List[str] ): """simple docstring""" A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype ) A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowercase ( UpperCamelCase : List[Any] ): """simple docstring""" A__ : List[str] =tf.convert_to_tensor(UpperCamelCase ) A__ : str =tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowercase ( UpperCamelCase : Tuple ): """simple docstring""" return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 ) def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ): """simple docstring""" A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase ) return a * tf.math.sigmoid(UpperCamelCase ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def lowercase ( UpperCamelCase : int ): """simple docstring""" return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase ) __A : Optional[Any] = tf.keras.activations.gelu __A : Optional[Any] = approximate_gelu_wrap else: __A : Any = _gelu __A : Union[str, Any] = _gelu_new __A : List[str] = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def lowercase ( UpperCamelCase : List[Any] ): """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
656
0
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : Any = "efficientnet" def __init__( self , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 600 , SCREAMING_SNAKE_CASE__ = 2.0 , SCREAMING_SNAKE_CASE__ = 3.1 , SCREAMING_SNAKE_CASE__ = 8 , SCREAMING_SNAKE_CASE__ = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE__ = [32, 16, 24, 40, 80, 112, 192] , SCREAMING_SNAKE_CASE__ = [16, 24, 40, 80, 112, 192, 320] , SCREAMING_SNAKE_CASE__ = [] , SCREAMING_SNAKE_CASE__ = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE__ = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE__ = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE__ = 0.2_5 , SCREAMING_SNAKE_CASE__ = "swish" , SCREAMING_SNAKE_CASE__ = 2560 , SCREAMING_SNAKE_CASE__ = "mean" , SCREAMING_SNAKE_CASE__ = 0.0_2 , SCREAMING_SNAKE_CASE__ = 0.0_0_1 , SCREAMING_SNAKE_CASE__ = 0.9_9 , SCREAMING_SNAKE_CASE__ = 0.5 , SCREAMING_SNAKE_CASE__ = 0.2 , **SCREAMING_SNAKE_CASE__ , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE__ ) A__ = num_channels A__ = image_size A__ = width_coefficient A__ = depth_coefficient A__ = depth_divisor A__ = kernel_sizes A__ = in_channels A__ = out_channels A__ = depthwise_padding A__ = strides A__ = num_block_repeats A__ = expand_ratios A__ = squeeze_expansion_ratio A__ = hidden_act A__ = hidden_dim A__ = pooling_type A__ = initializer_range A__ = batch_norm_eps A__ = batch_norm_momentum A__ = dropout_rate A__ = drop_connect_rate A__ = sum(SCREAMING_SNAKE_CASE__ ) * 4 class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : Union[str, Any] = version.parse("1.11" ) @property def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def snake_case__ ( self ) -> float: return 1e-5
104
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict ): A__ : Optional[Any] =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ): A__ : Tuple =parent A__ : List[Any] =batch_size A__ : List[Any] =image_size A__ : Union[str, Any] =num_channels A__ : Optional[int] =num_encoder_blocks A__ : Any =sr_ratios A__ : Any =depths A__ : List[Any] =hidden_sizes A__ : List[Any] =downsampling_rates A__ : List[str] =num_attention_heads A__ : int =is_training A__ : List[Any] =use_labels A__ : Any =hidden_act A__ : Dict =hidden_dropout_prob A__ : int =attention_probs_dropout_prob A__ : List[Any] =initializer_range A__ : Tuple =num_labels A__ : List[Any] =scope def _UpperCAmelCase ( self : Optional[int] ): A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ : Any =None if self.use_labels: A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A__ : List[Any] =self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self : Tuple ): return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ): A__ : Any =SegformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : Dict =model(UpperCamelCase__ ) A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ): A__ : str =self.num_labels A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : Optional[Any] =model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ): A__ : Tuple =1 A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ ) A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertGreater(result.loss , 0.0 ) def _UpperCAmelCase ( self : str ): A__ : Union[str, Any] =self.prepare_config_and_inputs() A__ , A__ , A__ : Tuple =config_and_inputs A__ : Tuple ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : Dict = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) __magic_name__ : Optional[int] = ( { """feature-extraction""": SegformerModel, """image-classification""": SegformerForImageClassification, """image-segmentation""": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ : Dict = True __magic_name__ : List[str] = False __magic_name__ : Optional[Any] = False __magic_name__ : str = False def _UpperCAmelCase ( self : Union[str, Any] ): A__ : Union[str, Any] =SegformerModelTester(self ) A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ ) def _UpperCAmelCase ( self : str ): self.config_tester.run_common_tests() def _UpperCAmelCase ( self : Dict ): A__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] ): A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ ) @unittest.skip("SegFormer does not use inputs_embeds" ) def _UpperCAmelCase ( self : Dict ): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" ) def _UpperCAmelCase ( self : Tuple ): pass def _UpperCAmelCase ( self : List[str] ): A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : int =model_class(UpperCamelCase__ ) A__ : Optional[int] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ : Optional[int] =[*signature.parameters.keys()] A__ : List[str] =["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _UpperCAmelCase ( self : str ): A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() A__ : Union[str, Any] =True for model_class in self.all_model_classes: A__ : Optional[Any] =True A__ : Union[str, Any] =False A__ : str =True A__ : Optional[int] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Any =outputs.attentions A__ : List[str] =sum(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ : Dict =True A__ : str =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Union[str, Any] =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first attentions (first block, first layer) A__ : List[Any] =(self.model_tester.image_size // 4) ** 2 A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) A__ : Tuple =(self.model_tester.image_size // 32) ** 2 A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) A__ : int =len(UpperCamelCase__ ) # Check attention is always last and order is fine A__ : Optional[Any] =True A__ : Any =True A__ : Union[str, Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) ) A__ : Optional[Any] =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first attentions (first block, first layer) A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2 A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def _UpperCAmelCase ( self : List[Any] ): def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ): A__ : Optional[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Optional[Any] =outputs.hidden_states A__ : int =self.model_tester.num_encoder_blocks self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : Optional[Any] =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ : str =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[int] ): if not self.model_tester.is_training: return A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common() A__ : List[Any] =True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase__ ): continue A__ : List[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss loss.backward() @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _UpperCAmelCase ( self : Tuple ): pass @slow def _UpperCAmelCase ( self : Tuple ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowercase ( ): """simple docstring""" A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @slow def _UpperCAmelCase ( self : Tuple ): # only resize + normalize A__ : List[Any] =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCamelCase__ ) A__ : Union[str, Any] =prepare_img() A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : int =model(UpperCamelCase__ ) A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A__ : Optional[int] =torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _UpperCAmelCase ( self : Union[str, Any] ): # only resize + normalize A__ : Dict =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : int =SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ ) A__ : Tuple =prepare_img() A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : int =model(UpperCamelCase__ ) A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A__ : List[Any] =torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) ) @slow def _UpperCAmelCase ( self : int ): # only resize + normalize A__ : Optional[Any] =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCamelCase__ ) A__ : str =prepare_img() A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : Dict =model(UpperCamelCase__ ) A__ : Any =outputs.logits.detach().cpu() A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] ) A__ : List[str] =torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) A__ : Tuple =torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
656
0
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Dict: """simple docstring""" if isinstance(lowerCamelCase_ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCAmelCase_ : def snake_case ( self ,snake_case__ ,snake_case__ ): pass def snake_case ( self ): pass def snake_case ( self ): pass def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ): SCREAMING_SNAKE_CASE_ : int = np.abs((a - b) ).max() self.assertLessEqual(snake_case__ ,snake_case__ ,F'Difference between torch and flax is {diff} (>= {tol}).' ) def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ): SCREAMING_SNAKE_CASE_ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ ,snake_case__ ) SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel(snake_case__ ) SCREAMING_SNAKE_CASE_ : int = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ ) self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], config.projection_dim) ) def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_vision_text_model(snake_case__ ,snake_case__ ) SCREAMING_SNAKE_CASE_ : List[Any] = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ ) self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], model.config.projection_dim) ) def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(snake_case__ ,snake_case__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ ) SCREAMING_SNAKE_CASE_ : Any = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE_ : int = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ ) SCREAMING_SNAKE_CASE_ : int = after_output[0] SCREAMING_SNAKE_CASE_ : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case__ ,1E-3 ) def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(snake_case__ ,snake_case__ ) SCREAMING_SNAKE_CASE_ : Any = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ ) SCREAMING_SNAKE_CASE_ : List[str] = model( input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ ,output_attentions=snake_case__ ) SCREAMING_SNAKE_CASE_ : int = output.vision_model_output.attentions self.assertEqual(len(snake_case__ ) ,vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ : Tuple = to_atuple(vision_model.config.image_size ) SCREAMING_SNAKE_CASE_ : int = to_atuple(vision_model.config.patch_size ) SCREAMING_SNAKE_CASE_ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE_ : str = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) ) SCREAMING_SNAKE_CASE_ : str = output.text_model_output.attentions self.assertEqual(len(snake_case__ ) ,text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,) def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ): pt_model.to(snake_case__ ) pt_model.eval() # prepare inputs SCREAMING_SNAKE_CASE_ : int = inputs_dict SCREAMING_SNAKE_CASE_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[str] = pt_model(**snake_case__ ).to_tuple() SCREAMING_SNAKE_CASE_ : Optional[Any] = fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) ,'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ): self.assert_almost_equals(snake_case__ ,pt_output.numpy() ,4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ ,from_pt=snake_case__ ) SCREAMING_SNAKE_CASE_ : Tuple = fx_model_loaded(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) ,'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ): self.assert_almost_equals(snake_case__ ,pt_output.numpy() ,4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderModel.from_pretrained(snake_case__ ,from_flax=snake_case__ ) pt_model_loaded.to(snake_case__ ) pt_model_loaded.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Union[str, Any] = pt_model_loaded(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) ,'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ): self.assert_almost_equals(snake_case__ ,pt_output_loaded.numpy() ,4E-2 ) def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ): SCREAMING_SNAKE_CASE_ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ ,snake_case__ ) SCREAMING_SNAKE_CASE_ : int = VisionTextDualEncoderModel(snake_case__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxVisionTextDualEncoderModel(snake_case__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,snake_case__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = fx_state self.check_pt_flax_equivalence(snake_case__ ,snake_case__ ,snake_case__ ) def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ): SCREAMING_SNAKE_CASE_ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ ,snake_case__ ) SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderModel(snake_case__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxVisionTextDualEncoderModel(snake_case__ ) SCREAMING_SNAKE_CASE_ : Tuple = load_flax_weights_in_pytorch_model(snake_case__ ,fx_model.params ) self.check_pt_flax_equivalence(snake_case__ ,snake_case__ ,snake_case__ ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**snake_case__ ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs() self.check_save_load(**snake_case__ ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : int = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**snake_case__ ) @is_pt_flax_cross_test def snake_case ( self ): SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_inputs_dict.pop('vision_config' ) SCREAMING_SNAKE_CASE_ : Tuple = config_inputs_dict.pop('text_config' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = config_inputs_dict self.check_equivalence_pt_to_flax(snake_case__ ,snake_case__ ,snake_case__ ) self.check_equivalence_flax_to_pt(snake_case__ ,snake_case__ ,snake_case__ ) @slow def snake_case ( self ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_pretrained_model_and_inputs() SCREAMING_SNAKE_CASE_ : Any = model_a(**snake_case__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE_ : List[str] = model_a(**snake_case__ ) SCREAMING_SNAKE_CASE_ : int = after_outputs[0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case__ ,1E-5 ) @require_flax class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' ,'hf-internal-testing/tiny-bert' ,vision_from_pt=snake_case__ ,text_from_pt=snake_case__ ,) SCREAMING_SNAKE_CASE_ : List[Any] = 13 SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) SCREAMING_SNAKE_CASE_ : Any = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size ) SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([batch_size, 4] ) SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def snake_case ( self ,snake_case__ ,snake_case__ ): SCREAMING_SNAKE_CASE_ : int = FlaxViTModel(snake_case__ ) SCREAMING_SNAKE_CASE_ : str = FlaxBertModel(snake_case__ ) return vision_model, text_model def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Dict = FlaxViTModelTester(self ) SCREAMING_SNAKE_CASE_ : int = FlaxBertModelTester(self ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : List[str] = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = vision_config_and_inputs SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' ,'hf-internal-testing/tiny-bert' ,vision_from_pt=snake_case__ ,text_from_pt=snake_case__ ,) SCREAMING_SNAKE_CASE_ : Any = 13 SCREAMING_SNAKE_CASE_ : str = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size ) SCREAMING_SNAKE_CASE_ : int = random_attention_mask([batch_size, 4] ) SCREAMING_SNAKE_CASE_ : Any = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def snake_case ( self ,snake_case__ ,snake_case__ ): SCREAMING_SNAKE_CASE_ : int = FlaxCLIPVisionModel(snake_case__ ) SCREAMING_SNAKE_CASE_ : Dict = FlaxBertModel(snake_case__ ) return vision_model, text_model def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Tuple = FlaxCLIPVisionModelTester(self ) SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxBertModelTester(self ) SCREAMING_SNAKE_CASE_ : Optional[int] = clip_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : Tuple = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = vision_config_and_inputs SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @slow def snake_case ( self ): SCREAMING_SNAKE_CASE_ : int = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' ,logit_scale_init_value=1.0 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) SCREAMING_SNAKE_CASE_ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE_ : Optional[int] = processor( text=['una foto di un gatto', 'una foto di un cane'] ,images=snake_case__ ,padding=snake_case__ ,return_tensors='np' ) SCREAMING_SNAKE_CASE_ : List[Any] = model(**snake_case__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,) SCREAMING_SNAKE_CASE_ : Dict = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image ,snake_case__ ,atol=1E-3 ) )
105
"""simple docstring""" import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ): A__ : str =parent A__ : List[str] =batch_size A__ : Any =seq_length A__ : List[str] =is_training A__ : List[Any] =use_attention_mask A__ : List[Any] =use_token_type_ids A__ : Dict =use_labels A__ : List[Any] =vocab_size A__ : Optional[int] =hidden_size A__ : Optional[Any] =num_hidden_layers A__ : str =num_attention_heads A__ : int =intermediate_size A__ : Tuple =hidden_act A__ : Tuple =hidden_dropout_prob A__ : Dict =attention_probs_dropout_prob A__ : Any =max_position_embeddings A__ : Any =type_vocab_size A__ : Union[str, Any] =type_sequence_label_size A__ : Optional[Any] =initializer_range A__ : int =num_choices def _UpperCAmelCase ( self : Tuple ): A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : List[str] =None if self.use_attention_mask: A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] ) A__ : str =None if self.use_token_type_ids: A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Any =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self : Tuple ): A__ : Dict =self.prepare_config_and_inputs() A__ , A__ , A__ , A__ : str =config_and_inputs A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _UpperCAmelCase ( self : int ): A__ : str =self.prepare_config_and_inputs() A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs A__ : Union[str, Any] =True A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : Union[str, Any] = True __magic_name__ : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self : Optional[int] ): A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self ) @slow def _UpperCAmelCase ( self : List[Any] ): for model_class_name in self.all_model_classes: A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : Union[str, Any] =model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @slow def _UpperCAmelCase ( self : Tuple ): A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) A__ : str =model(UpperCamelCase__ )[0] A__ : List[Any] =[1, 11, 50265] self.assertEqual(list(output.shape ) , UpperCamelCase__ ) # compare the actual values for a slice. A__ : Any =np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _UpperCAmelCase ( self : List[Any] ): A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) A__ : Dict =model(UpperCamelCase__ )[0] # compare the actual values for a slice. A__ : Optional[Any] =np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
656
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __snake_case :List[Any] =logging.get_logger(__name__) __snake_case :Union[str, Any] ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __snake_case :Tuple ={ 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } __snake_case :str ={ 'junnyu/roformer_chinese_small': 1536, 'junnyu/roformer_chinese_base': 1536, 'junnyu/roformer_chinese_char_small': 512, 'junnyu/roformer_chinese_char_base': 512, 'junnyu/roformer_small_discriminator': 128, 'junnyu/roformer_small_generator': 128, } __snake_case :List[Any] ={ 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class lowerCAmelCase__ ( _lowerCamelCase ): A_ : Optional[Any] = VOCAB_FILES_NAMES A_ : str = PRETRAINED_VOCAB_FILES_MAP A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Tuple = PRETRAINED_INIT_CONFIGURATION A_ : Optional[Any] = RoFormerTokenizer def __init__( self : Optional[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]="[UNK]" , __UpperCamelCase : Any="[SEP]" , __UpperCamelCase : List[Any]="[PAD]" , __UpperCamelCase : Tuple="[CLS]" , __UpperCamelCase : List[str]="[MASK]" , __UpperCamelCase : int=True , __UpperCamelCase : Tuple=None , **__UpperCamelCase : Tuple , ) -> Union[str, Any]: super().__init__( __UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , ) A = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' , __UpperCamelCase ) != do_lower_case or pre_tok_state.get('strip_accents' , __UpperCamelCase ) != strip_accents ): A = getattr(__UpperCamelCase , pre_tok_state.pop('type' ) ) A = do_lower_case A = strip_accents A = pre_tok_class(**__UpperCamelCase ) A = do_lower_case def __getstate__( self : str ) -> Optional[int]: A = self.__dict__.copy() A = BertPreTokenizer() return state def __setstate__( self : Dict , __UpperCamelCase : Any ) -> str: A = d A = self.__dict__['_tokenizer'].get_vocab() A = PreTokenizer.custom(JiebaPreTokenizer(__UpperCamelCase ) ) def __UpperCamelCase ( self : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=None ) -> Union[str, Any]: A = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCamelCase ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]: A = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[str]=False , **__UpperCamelCase : Union[str, Any] , ) -> Optional[Any]: A = BertPreTokenizer() return super().save_pretrained(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
106
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __A : List[Any] = logging.get_logger(__name__) __A : Any = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] __A : Optional[int] = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def lowercase ( UpperCamelCase : Tuple ): """simple docstring""" A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" ) return sd def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ): """simple docstring""" A__ : List[str] =OrderedDict() A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A__ : Optional[Any] =key for name_pair in rename_keys_prefix: A__ : int =new_key.replace(name_pair[0] , name_pair[1] ) A__ : Dict =d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A__ : Optional[int] =new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: A__ : Any ="pretraining" if "vcr" in checkpoint_path: A__ : Union[str, Any] ={"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: A__ : Optional[Any] ={"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: A__ : List[str] ={"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 512} A__ : List[str] ="multichoice" elif "vqa_advanced" in checkpoint_path: A__ : Any ={"visual_embedding_dim": 2048} A__ : str ="vqa_advanced" elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129} A__ : str ="vqa" elif "nlvr" in checkpoint_path: A__ : str ={ "visual_embedding_dim": 1024, "num_labels": 2, } A__ : Dict ="nlvr" A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase ) # Load State Dict A__ : int =load_state_dict(UpperCamelCase ) A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase ) if model_type == "pretraining": A__ : str =VisualBertForPreTraining(UpperCamelCase ) elif model_type == "vqa": A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase ) elif model_type == "nlvr": A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase ) elif model_type == "multichoice": A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) # Save Checkpoints Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": __A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") __A : str = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
656
0
'''simple docstring''' from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata _UpperCAmelCase : int = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class lowercase_ ( tr.AbstractTransform ): """simple docstring""" def __init__( self : Tuple, UpperCamelCase__ : str = " " ) -> str: _A = sentence_delimiter def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : str ) -> List[str]: return list(UpperCamelCase__ ) def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : List[str] ) -> Union[str, Any]: _A = [] for sent_idx, sentence in enumerate(UpperCamelCase__ ): chars.extend(self.process_string(UpperCamelCase__ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1: chars.append(self.sentence_delimiter ) return chars _UpperCAmelCase : Optional[int] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: _UpperCAmelCase : Optional[int] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) _UpperCAmelCase : Tuple = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' _UpperCAmelCase : Dict = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' _UpperCAmelCase : List[Any] = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Any ) -> str: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('string', id='sequence' ), 'references': datasets.Value('string', id='sequence' ), } ), codebase_urls=['https://github.com/jitsi/jiwer/'], reference_urls=[ 'https://en.wikipedia.org/wiki/Word_error_rate', 'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates', ], ) def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : List[Any]=False ) -> Tuple: if concatenate_texts: return jiwer.compute_measures( UpperCamelCase__, UpperCamelCase__, truth_transform=UpperCamelCase__, hypothesis_transform=UpperCamelCase__, )["wer"] _A = 0 _A = 0 for prediction, reference in zip(UpperCamelCase__, UpperCamelCase__ ): _A = jiwer.compute_measures( UpperCamelCase__, UpperCamelCase__, truth_transform=UpperCamelCase__, hypothesis_transform=UpperCamelCase__, ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
107
"""simple docstring""" __A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)} def lowercase ( UpperCamelCase : int ): """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) ) def lowercase ( ): """simple docstring""" return sum( number for number in range(1000 , 1000000 ) if number == digits_fifth_powers_sum(UpperCamelCase ) ) if __name__ == "__main__": print(solution())
656
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __a: str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _lowerCamelCase = ['''pixel_values'''] def __init__( self : Any , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , **lowerCamelCase : Optional[Any] , ) -> None: """simple docstring""" super().__init__(**lowerCamelCase ) _UpperCAmelCase = size if size is not None else {"""height""": 256, """width""": 256} _UpperCAmelCase = get_size_dict(lowerCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _UpperCAmelCase = get_size_dict(lowerCamelCase , param_name="""crop_size""" ) _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[str] , ) -> np.ndarray: """simple docstring""" _UpperCAmelCase = get_size_dict(lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( lowerCamelCase , size=(size["""height"""], size["""width"""]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Optional[int] , ) -> np.ndarray: """simple docstring""" _UpperCAmelCase = get_size_dict(lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase , **lowerCamelCase ) def lowerCamelCase ( self : Optional[int] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Optional[Any] , ) -> int: """simple docstring""" return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : int , ) -> np.ndarray: """simple docstring""" return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def lowerCamelCase ( self : Tuple , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : Any=None , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : bool = None , lowerCamelCase : float = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase : Union[str, Any] , ) -> PIL.Image.Image: """simple docstring""" _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(lowerCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(lowerCamelCase , param_name="""crop_size""" ) _UpperCAmelCase = make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(lowerCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images] _UpperCAmelCase = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
108
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __A : Optional[Any] = logging.get_logger(__name__) # General docstring __A : str = "PoolFormerConfig" # Base docstring __A : Optional[Any] = "sail/poolformer_s12" __A : List[Any] = [1, 512, 7, 7] # Image classification docstring __A : List[str] = "sail/poolformer_s12" __A : Tuple = "tabby, tabby cat" __A : Tuple = [ "sail/poolformer_s12", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ): """simple docstring""" if drop_prob == 0.0 or not training: return input A__ : Tuple =1 - drop_prob A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor return output class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ): super().__init__() A__ : Optional[int] =drop_prob def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ): return drop_path(UpperCamelCase__ , self.drop_prob , self.training ) def _UpperCAmelCase ( self : List[str] ): return "p={}".format(self.drop_prob ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): super().__init__() A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size) A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride) A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding) A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ ) A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ): A__ : List[str] =self.projection(UpperCamelCase__ ) A__ : Any =self.norm(UpperCamelCase__ ) return embeddings class __lowerCAmelCase ( nn.GroupNorm): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ): super().__init__() A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ): return self.pool(UpperCamelCase__ ) - hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ): super().__init__() A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Dict =PoolFormerDropPath(UpperCamelCase__ ) if isinstance(config.hidden_act , UpperCamelCase__ ): A__ : Tuple =ACTaFN[config.hidden_act] else: A__ : Optional[Any] =config.hidden_act def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ): A__ : Optional[Any] =self.conva(UpperCamelCase__ ) A__ : List[str] =self.act_fn(UpperCamelCase__ ) A__ : List[str] =self.drop(UpperCamelCase__ ) A__ : Optional[int] =self.conva(UpperCamelCase__ ) A__ : Optional[Any] =self.drop(UpperCamelCase__ ) return hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ): super().__init__() A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ ) A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) # Useful for training neural nets A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity() A__ : Optional[Any] =config.use_layer_scale if config.use_layer_scale: A__ : List[str] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) A__ : List[Any] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ): if self.use_layer_scale: A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) ) A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : Tuple =() A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) ) A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection A__ : str =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : List[Any] =(output,) + outputs return outputs else: A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) ) # First residual connection A__ : Optional[Any] =pooling_output + hidden_states A__ : Tuple =() # Second residual connection inside the PoolFormerOutput block A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) ) A__ : Any =hidden_states + layer_output A__ : Tuple =(output,) + outputs return outputs class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : List[str] ): super().__init__() A__ : Tuple =config # stochastic depth decay rule A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings A__ : Tuple =[] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) A__ : List[str] =nn.ModuleList(UpperCamelCase__ ) # Transformer blocks A__ : Union[str, Any] =[] A__ : Any =0 for i in range(config.num_encoder_blocks ): # each block consists of layers A__ : Union[str, Any] =[] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(UpperCamelCase__ ) ) A__ : str =nn.ModuleList(UpperCamelCase__ ) def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ): A__ : Union[str, Any] =() if output_hidden_states else None A__ : Dict =pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): A__ , A__ : List[Any] =layers # Get patch embeddings from hidden_states A__ : Any =embedding_layer(UpperCamelCase__ ) # Send the embeddings through the blocks for _, blk in enumerate(UpperCamelCase__ ): A__ : List[str] =blk(UpperCamelCase__ ) A__ : Tuple =layer_outputs[0] if output_hidden_states: A__ : List[Any] =all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : List[str] = PoolFormerConfig __magic_name__ : int = """poolformer""" __magic_name__ : Any = """pixel_values""" __magic_name__ : Any = True def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ): if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCamelCase__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): A__ : Optional[Any] =value __A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n" @add_start_docstrings( """The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : Dict ): super().__init__(UpperCamelCase__ ) A__ : List[Any] =config A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ ) # Initialize weights and apply final processing self.post_init() def _UpperCAmelCase ( self : Tuple ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : int =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) A__ : List[Any] =self.encoder( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : int =encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ): super().__init__() A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size ) def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ): A__ : int =self.dense(UpperCamelCase__ ) return output @add_start_docstrings( """ PoolFormer Model transformer with an image classification head on top """ , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : str ): super().__init__(UpperCamelCase__ ) A__ : List[str] =config.num_labels A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ ) # Final norm A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head A__ : Dict =( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict A__ : List[str] =self.poolformer( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : str =outputs[0] A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) ) A__ : Optional[Any] =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A__ : int ="regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A__ : Tuple ="single_label_classification" else: A__ : Optional[int] ="multi_label_classification" if self.config.problem_type == "regression": A__ : Dict =MSELoss() if self.num_labels == 1: A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() ) else: A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) elif self.config.problem_type == "single_label_classification": A__ : Tuple =CrossEntropyLoss() A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A__ : List[Any] =BCEWithLogitsLoss() A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) if not return_dict: A__ : Optional[int] =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
656
0
'''simple docstring''' import re def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' __SCREAMING_SNAKE_CASE = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) ) if __name__ == "__main__": a = "0094702343221" print(is_sri_lankan_phone_number(phone))
109
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : int = IFInpaintingSuperResolutionPipeline __magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""}) __magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def _UpperCAmelCase ( self : Union[str, Any] ): return self._get_superresolution_dummy_components() def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ): if str(UpperCamelCase__ ).startswith("mps" ): A__ : Any =torch.manual_seed(UpperCamelCase__ ) else: A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : List[str] ={ "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _UpperCAmelCase ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _UpperCAmelCase ( self : int ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _UpperCAmelCase ( self : Tuple ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _UpperCAmelCase ( self : str ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _UpperCAmelCase ( self : Dict ): self._test_save_load_local() def _UpperCAmelCase ( self : Optional[int] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
656
0
"""simple docstring""" import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch A : Optional[int] = "sshleifer/bart-tiny-random" A : str = "patrickvonplaten/t5-tiny-random" @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case ( self ): return AutoConfig.from_pretrained(UpperCamelCase__ ) def snake_case ( self ): __lowerCAmelCase = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def snake_case ( self ): __lowerCAmelCase = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase__ ) def snake_case ( self ): __lowerCAmelCase = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase__ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def snake_case ( self ): __lowerCAmelCase = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def snake_case ( self ): with self.assertRaises(UpperCamelCase__ ): create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=UpperCamelCase__ , d=UpperCamelCase__ )
636
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A : Any = { "configuration_efficientformer": [ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientFormerConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["EfficientFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", "EfficientFormerPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", "TFEfficientFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
656
0
"""simple docstring""" from __future__ import annotations from typing import Any class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ,A_ : int ,A_ : int ,A_ : float = 0 ) -> Any: A = row, column A = [[default_value for c in range(UpperCamelCase__ )] for r in range(UpperCamelCase__ )] def __str__( self : List[Any] ) -> Optional[Any]: A = F'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier A = 0 for row_vector in self.array: for obj in row_vector: A = max(UpperCamelCase__ ,len(str(UpperCamelCase__ ) ) ) A = F'%{max_element_length}s' # Make string and return def single_line(A_ : list[float] ) -> str: nonlocal string_format_identifier A = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(UpperCamelCase__ ) for row_vector in self.array ) return s def __repr__( self : Dict ) -> Optional[int]: return str(self ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : tuple[int, int] ) -> Union[str, Any]: if not (isinstance(UpperCamelCase__ ,(list, tuple) ) and len(UpperCamelCase__ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : Optional[int] ,A_ : tuple[int, int] ) -> Optional[int]: assert self.validate_indicies(UpperCamelCase__ ) return self.array[loc[0]][loc[1]] def __setitem__( self : str ,A_ : tuple[int, int] ,A_ : float ) -> Tuple: assert self.validate_indicies(UpperCamelCase__ ) A = value def __add__( self : str ,A_ : Matrix ) -> str: assert isinstance(UpperCamelCase__ ,UpperCamelCase__ ) assert self.row == another.row and self.column == another.column # Add A = Matrix(self.row ,self.column ) for r in range(self.row ): for c in range(self.column ): A = self[r, c] + another[r, c] return result def __neg__( self : Optional[int] ) -> Union[str, Any]: A = Matrix(self.row ,self.column ) for r in range(self.row ): for c in range(self.column ): A = -self[r, c] return result def __sub__( self : Optional[int] ,A_ : Matrix ) -> Union[str, Any]: return self + (-another) def __mul__( self : Optional[int] ,A_ : int | float | Matrix ) -> Optional[int]: if isinstance(UpperCamelCase__ ,(int, float) ): # Scalar multiplication A = Matrix(self.row ,self.column ) for r in range(self.row ): for c in range(self.column ): A = self[r, c] * another return result elif isinstance(UpperCamelCase__ ,UpperCamelCase__ ): # Matrix multiplication assert self.column == another.row A = Matrix(self.row ,another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: A = F'Unsupported type given for another ({type(UpperCamelCase__ )})' raise TypeError(UpperCamelCase__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = Matrix(self.column ,self.row ) for r in range(self.row ): for c in range(self.column ): A = self[r, c] return result def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Matrix ,A_ : Matrix ) -> Tuple: assert isinstance(UpperCamelCase__ ,UpperCamelCase__ ) and isinstance(UpperCamelCase__ ,UpperCamelCase__ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate A = v.transpose() A = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _snake_case ( ): A = Matrix(3 , 3 , 0 ) for i in range(3 ): A = 1 print(F'a^(-1) is {ainv}' ) # u, v A = Matrix(3 , 1 , 0 ) A = 1, 2, -3 A = Matrix(3 , 1 , 0 ) A = 4, -2, 5 print(F'u is {u}' ) print(F'v is {v}' ) print(F'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case__ , snake_case__ )}' ) def _snake_case ( ): import doctest doctest.testmod() testa()
91
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Tuple =[] for _ in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Dict =[] for step in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" ) torch.save(scheduler.state_dict() , UpperCamelCase ) A__ : Dict =torch.load(UpperCamelCase ) scheduler.load_state_dict(UpperCamelCase ) return lrs @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] ) A__ : Any =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _UpperCAmelCase ( self : Dict ): A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Dict =torch.tensor([0.4, 0.2, -0.5] ) A__ : Optional[int] =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : int =Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , ) for _ in range(1000 ): A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' __magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None __magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None __magic_name__ : Union[str, Any] = 10 def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A__ : Union[str, Any] ={ get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A__ , A__ : Any =data A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : str ): A__ : int =fn def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ): return self.fn(*UpperCamelCase__ , **UpperCamelCase__ ) @classmethod def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ): A__ : str =list(map(self , scheduler.lr_lambdas ) )
656
0
"""simple docstring""" def _UpperCamelCase ( A , A ): UpperCamelCase_ =len(A ) UpperCamelCase_ =[] for i in range(len(A ) - pat_len + 1 ): UpperCamelCase_ =True for j in range(A ): if s[i + j] != pattern[j]: UpperCamelCase_ =False break if match_found: position.append(A ) return position if __name__ == "__main__": assert naive_pattern_search("ABCDEFG", "DE") == [3] print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
391
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A : List[Any] = logging.get_logger("transformers.models.speecht5") __A : Optional[Any] = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A : Optional[int] = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A : List[str] = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A : List[Any] = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A : Union[str, Any] = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A : Any = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A : Union[str, Any] = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A : Optional[int] = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A : Union[str, Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A : Optional[Any] = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : Optional[int] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : int = [] __A : int = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A : Optional[Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A : Tuple = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A : Union[str, Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ): """simple docstring""" for attribute in key.split("." ): A__ : Dict =getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape else: A__ : Tuple =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": A__ : Any =value elif weight_type == "weight_g": A__ : Any =value elif weight_type == "weight_v": A__ : Any =value elif weight_type == "bias": A__ : Tuple =value elif weight_type == "running_mean": A__ : Dict =value elif weight_type == "running_var": A__ : List[str] =value elif weight_type == "num_batches_tracked": A__ : Dict =value else: A__ : Optional[int] =value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ): """simple docstring""" for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A__ , A__ : List[str] =key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ): """simple docstring""" A__ : Tuple =[] if task == "s2t": A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder A__ : int =MAPPING_S2T A__ : List[Any] =IGNORE_KEYS_S2T elif task == "t2s": A__ : Union[str, Any] =None A__ : List[Any] =MAPPING_T2S A__ : Tuple =IGNORE_KEYS_T2S elif task == "s2s": A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder A__ : Tuple =MAPPING_S2S A__ : Any =IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase , UpperCamelCase ): logger.info(F'''{name} was ignored''' ) continue A__ : Optional[Any] =False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , ) A__ : List[Any] =True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: A__ , A__ : Dict =key.split(".*." ) if prefix in name and suffix in name: A__ : int =suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: A__ : List[Any] =True if "*" in mapped_key: A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2] A__ : int =mapped_key.replace("*" , UpperCamelCase ) if "weight_g" in name: A__ : str ="weight_g" elif "weight_v" in name: A__ : Optional[Any] ="weight_v" elif "bias" in name: A__ : Any ="bias" elif "weight" in name: A__ : Optional[int] ="weight" elif "running_mean" in name: A__ : Tuple ="running_mean" elif "running_var" in name: A__ : Optional[int] ="running_var" elif "num_batches_tracked" in name: A__ : str ="num_batches_tracked" else: A__ : List[Any] =None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ): """simple docstring""" A__ : Any =full_name.split("conv_layers." )[-1] A__ : Dict =name.split("." ) A__ : int =int(items[0] ) A__ : str =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A__ : Optional[Any] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A__ : Optional[int] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase ) @torch.no_grad() def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ): """simple docstring""" if config_path is not None: A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase ) else: A__ : Any =SpeechTaConfig() if task == "s2t": A__ : Union[str, Any] =config.max_text_positions A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase ) elif task == "t2s": A__ : str =1876 A__ : Optional[int] =600 A__ : Tuple =config.max_speech_positions A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase ) elif task == "s2s": A__ : str =1876 A__ : Tuple =config.max_speech_positions A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) A__ : int =mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) A__ : Dict =SpeechTaFeatureExtractor() A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) A__ : Union[str, Any] =torch.load(UpperCamelCase ) recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase ) model.push_to_hub(UpperCamelCase ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A : str = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
656
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __lowerCAmelCase ( _UpperCamelCase ): """simple docstring""" _UpperCAmelCase : List[Any] ="""speech_to_text""" _UpperCAmelCase : List[str] =["""past_key_values"""] _UpperCAmelCase : List[str] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Tuple , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : str=20_48 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : int=6 , lowerCAmelCase : Union[str, Any]=20_48 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : str="relu" , lowerCAmelCase : int=2_56 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : str=0.0 , lowerCAmelCase : str=0.0 , lowerCAmelCase : int=0.0_2 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : Union[str, Any]=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : str=60_00 , lowerCAmelCase : Tuple=10_24 , lowerCAmelCase : int=2 , lowerCAmelCase : int=(5, 5) , lowerCAmelCase : Tuple=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : Any=1 , **lowerCAmelCase : Optional[int] , ): A_ = vocab_size A_ = d_model A_ = encoder_ffn_dim A_ = encoder_layers A_ = encoder_attention_heads A_ = decoder_ffn_dim A_ = decoder_layers A_ = decoder_attention_heads A_ = dropout A_ = attention_dropout A_ = activation_dropout A_ = activation_function A_ = init_std A_ = encoder_layerdrop A_ = decoder_layerdrop A_ = use_cache A_ = encoder_layers A_ = scale_embedding # scale factor will be sqrt(d_model) if True A_ = max_source_positions A_ = max_target_positions A_ = num_conv_layers A_ = list(UpperCamelCase__ ) A_ = conv_channels A_ = input_feat_per_channel A_ = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` " F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, " F"`config.num_conv_layers = {self.num_conv_layers}`." ) super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
452
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase): '''simple docstring''' __magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ): super().__init__() A__ : Dict =prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) A__ : Optional[int] =prefix_inner_dim A__ : Optional[int] =prefix_hidden_dim A__ : Optional[int] =( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : Optional[int] =( nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : str =GPTaConfig( vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , ) A__ : Any =GPTaLMHeadModel(UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ): A__ : int =self.transformer.transformer.wte(UpperCamelCase__ ) A__ : Tuple =self.encode_prefix(UpperCamelCase__ ) A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ ) A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 ) A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ): return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ): return self.encode_prefix(UpperCamelCase__ ) @torch.no_grad() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ): A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 ) A__ : List[str] =[] A__ : Dict =[] for feature in features: A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature # Only support beam search for now A__ , A__ : Optional[Any] =self.generate_beam( input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A__ : Optional[Any] =torch.stack(UpperCamelCase__ ) A__ : Optional[int] =torch.stack(UpperCamelCase__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ): A__ : str =eos_token_id A__ : Optional[Any] =None A__ : int =None A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int ) A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool ) if input_embeds is not None: A__ : Union[str, Any] =input_embeds else: A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ ) A__ : Tuple =outputs.logits A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A__ : Optional[Any] =logits.softmax(-1 ).log() if scores is None: A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 ) A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] ) A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A__ : str =next_tokens else: A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] ) A__ : str =torch.cat((tokens, next_tokens) , dim=1 ) else: A__ : Union[str, Any] =-float(np.inf ) A__ : Dict =0 A__ : Optional[Any] =scores[:, None] + logits seq_lengths[~is_stopped] += 1 A__ : Optional[Any] =scores_sum / seq_lengths[:, None] A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 ) A__ : Tuple =next_tokens // scores_sum.shape[1] A__ : List[Any] =seq_lengths[next_tokens_source] A__ : int =next_tokens % scores_sum.shape[1] A__ : str =next_tokens.unsqueeze(1 ) A__ : List[Any] =tokens[next_tokens_source] A__ : int =torch.cat((tokens, next_tokens) , dim=1 ) A__ : List[str] =generated[next_tokens_source] A__ : Optional[Any] =scores_sum_average * seq_lengths A__ : Optional[int] =is_stopped[next_tokens_source] A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A__ : str =torch.cat((generated, next_token_embed) , dim=1 ) A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze() if is_stopped.all(): break A__ : Optional[int] =scores / seq_lengths A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ ) # tokens tensors are already padded to max_seq_length A__ : int =[tokens[i] for i in order] A__ : Any =torch.stack(UpperCamelCase__ , dim=0 ) A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
656
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ : Optional[int] = logging.get_logger(__name__) __magic_name__ : int = { "microsoft/unispeech-large-1500h-cv": ( "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case (_UpperCamelCase ): __a = """unispeech""" def __init__( self: Any , A_: int=32 , A_: Dict=7_68 , A_: Optional[Any]=12 , A_: Union[str, Any]=12 , A_: Tuple=30_72 , A_: Tuple="gelu" , A_: List[str]=0.1 , A_: str=0.1 , A_: str=0.1 , A_: Optional[int]=0.0 , A_: List[Any]=0.0 , A_: int=0.1 , A_: int=0.1 , A_: Any=0.02 , A_: List[str]=1E-5 , A_: str="group" , A_: Union[str, Any]="gelu" , A_: Tuple=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , A_: Optional[int]=(5, 2, 2, 2, 2, 2, 2) , A_: Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , A_: Dict=False , A_: int=1_28 , A_: Optional[Any]=16 , A_: Optional[int]=False , A_: Union[str, Any]=True , A_: Dict=0.05 , A_: str=10 , A_: Dict=2 , A_: Dict=0.0 , A_: Optional[Any]=10 , A_: Union[str, Any]=0 , A_: Tuple=3_20 , A_: Union[str, Any]=2 , A_: Optional[int]=0.1 , A_: Tuple=1_00 , A_: str=2_56 , A_: List[str]=2_56 , A_: str=0.1 , A_: int="mean" , A_: str=False , A_: Union[str, Any]=False , A_: Any=2_56 , A_: List[Any]=80 , A_: Optional[int]=0 , A_: Any=1 , A_: Optional[int]=2 , A_: Optional[Any]=0.5 , **A_: int , ): super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __lowerCamelCase = hidden_size __lowerCamelCase = feat_extract_norm __lowerCamelCase = feat_extract_activation __lowerCamelCase = list(UpperCamelCase__ ) __lowerCamelCase = list(UpperCamelCase__ ) __lowerCamelCase = list(UpperCamelCase__ ) __lowerCamelCase = conv_bias __lowerCamelCase = num_conv_pos_embeddings __lowerCamelCase = num_conv_pos_embedding_groups __lowerCamelCase = len(self.conv_dim ) __lowerCamelCase = num_hidden_layers __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = feat_proj_dropout __lowerCamelCase = final_dropout __lowerCamelCase = layerdrop __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = num_ctc_classes __lowerCamelCase = vocab_size __lowerCamelCase = do_stable_layer_norm __lowerCamelCase = use_weighted_layer_sum __lowerCamelCase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __lowerCamelCase = num_codevectors_per_group __lowerCamelCase = num_codevector_groups __lowerCamelCase = contrastive_logits_temperature __lowerCamelCase = feat_quantizer_dropout __lowerCamelCase = num_negatives __lowerCamelCase = codevector_dim __lowerCamelCase = proj_codevector_dim __lowerCamelCase = diversity_loss_weight # ctc loss __lowerCamelCase = ctc_loss_reduction __lowerCamelCase = ctc_zero_infinity # pretraining loss __lowerCamelCase = replace_prob @property def __a ( self: str ): return functools.reduce(operator.mul , self.conv_stride , 1 )
281
"""simple docstring""" import os def lowercase ( ): """simple docstring""" A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) ) A__ : str =os.path.join(UpperCamelCase , "triangle.txt" ) with open(UpperCamelCase ) as f: A__ : Optional[int] =f.readlines() A__ : str =[] for line in triangle: A__ : Union[str, Any] =[] for number in line.strip().split(" " ): numbers_from_line.append(int(UpperCamelCase ) ) a.append(UpperCamelCase ) for i in range(1 , len(UpperCamelCase ) ): for j in range(len(a[i] ) ): A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0 A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(UpperCamelCase , UpperCamelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
656
0
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCAmelCase__ ( unittest.TestCase ): def A__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self ): _A : Optional[Any] = 1 _A : List[str] = 3 _A : Dict = (32, 32) _A : Any = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(UpperCamelCase__ ) return image @property def A__ ( self ): torch.manual_seed(0 ) _A : int = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,) return model @property def A__ ( self ): torch.manual_seed(0 ) _A : List[Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,) return model @property def A__ ( self ): torch.manual_seed(0 ) _A : Dict = RobertaSeriesConfig( hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5006 ,) return RobertaSeriesModelWithTransformation(UpperCamelCase__ ) @property def A__ ( self ): def extract(*A__ ,**A__ ): class UpperCAmelCase__ : def __init__( self ): _A : Optional[int] = torch.ones([0] ) def A__ ( self ,A__ ): self.pixel_values.to(UpperCamelCase__ ) return self return Out() return extract def A__ ( self ): _A : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _A : List[str] = self.dummy_cond_unet _A : Dict = PNDMScheduler(skip_prk_steps=UpperCamelCase__ ) _A : Optional[int] = self.dummy_vae _A : Optional[int] = self.dummy_text_encoder _A : Union[str, Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) _A : Any = 77 _A : List[Any] = self.dummy_image.to(UpperCamelCase__ ) _A : Optional[int] = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk _A : Any = AltDiffusionImgaImgPipeline( unet=UpperCamelCase__ ,scheduler=UpperCamelCase__ ,vae=UpperCamelCase__ ,text_encoder=UpperCamelCase__ ,tokenizer=UpperCamelCase__ ,safety_checker=UpperCamelCase__ ,feature_extractor=self.dummy_extractor ,) _A : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=UpperCamelCase__ ) _A : List[Any] = alt_pipe.to(UpperCamelCase__ ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _A : Optional[Any] = "A painting of a squirrel eating a burger" _A : str = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 ) _A : List[Any] = alt_pipe( [prompt] ,generator=UpperCamelCase__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=UpperCamelCase__ ,) _A : int = output.images _A : List[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 ) _A : str = alt_pipe( [prompt] ,generator=UpperCamelCase__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=UpperCamelCase__ ,return_dict=UpperCamelCase__ ,)[0] _A : Dict = image[0, -3:, -3:, -1] _A : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _A : int = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' ) def A__ ( self ): _A : int = self.dummy_cond_unet _A : Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase__ ) _A : Optional[Any] = self.dummy_vae _A : Tuple = self.dummy_text_encoder _A : List[str] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) _A : Optional[int] = 77 _A : int = self.dummy_image.to(UpperCamelCase__ ) # put models in fp16 _A : Optional[Any] = unet.half() _A : int = vae.half() _A : Optional[Any] = bert.half() # make sure here that pndm scheduler skips prk _A : Optional[Any] = AltDiffusionImgaImgPipeline( unet=UpperCamelCase__ ,scheduler=UpperCamelCase__ ,vae=UpperCamelCase__ ,text_encoder=UpperCamelCase__ ,tokenizer=UpperCamelCase__ ,safety_checker=UpperCamelCase__ ,feature_extractor=self.dummy_extractor ,) _A : Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=UpperCamelCase__ ) _A : Optional[int] = alt_pipe.to(UpperCamelCase__ ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _A : Union[str, Any] = "A painting of a squirrel eating a burger" _A : Dict = torch.manual_seed(0 ) _A : int = alt_pipe( [prompt] ,generator=UpperCamelCase__ ,num_inference_steps=2 ,output_type='''np''' ,image=UpperCamelCase__ ,).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' ) def A__ ( self ): _A : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) # resize to resolution that is divisible by 8 but not 16 or 32 _A : Dict = init_image.resize((760, 504) ) _A : List[Any] = "BAAI/AltDiffusion" _A : List[str] = AltDiffusionImgaImgPipeline.from_pretrained( UpperCamelCase__ ,safety_checker=UpperCamelCase__ ,) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing() _A : str = "A fantasy landscape, trending on artstation" _A : Tuple = torch.manual_seed(0 ) _A : List[Any] = pipe( prompt=UpperCamelCase__ ,image=UpperCamelCase__ ,strength=0.75 ,guidance_scale=7.5 ,generator=UpperCamelCase__ ,output_type='''np''' ,) _A : Union[str, Any] = output.images[0] _A : List[str] = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) _A : Dict = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): def A__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self ): _A : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) _A : Union[str, Any] = init_image.resize((768, 512) ) _A : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' ) _A : Tuple = "BAAI/AltDiffusion" _A : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained( UpperCamelCase__ ,safety_checker=UpperCamelCase__ ,) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing() _A : Optional[int] = "A fantasy landscape, trending on artstation" _A : Dict = torch.manual_seed(0 ) _A : List[Any] = pipe( prompt=UpperCamelCase__ ,image=UpperCamelCase__ ,strength=0.75 ,guidance_scale=7.5 ,generator=UpperCamelCase__ ,output_type='''np''' ,) _A : Optional[int] = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
206
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A : int = logging.get_logger(__name__) def lowercase ( UpperCamelCase : Any ): """simple docstring""" A__ : str =OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): A__ : Dict =key.replace("module.encoder" , "glpn.encoder" ) if key.startswith("module.decoder" ): A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )] A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' ) if "norm" in key: A__ : Dict =key.replace("norm" , "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' ) if "layer_norm1" in key: A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 A__ : int =key[key.find("block" ) + len("block" )] A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' ) if "attn.q" in key: A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: A__ : str =key.replace("attn" , "attention.self" ) if "fc1" in key: A__ : Dict =key.replace("fc1" , "dense1" ) if "fc2" in key: A__ : str =key.replace("fc2" , "dense2" ) if "linear_pred" in key: A__ : List[Any] =key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" ) A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A__ : str =key[key.find("linear_c" ) + len("linear_c" )] A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' ) if "bot_conv" in key: A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" ) if "skip_conv1" in key: A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" ) if "skip_conv2" in key: A__ : int =key.replace("skip_conv2" , "2.convolution" ) if "fusion1" in key: A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" ) if "fusion2" in key: A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" ) if "fusion3" in key: A__ : int =key.replace("fusion3" , "3.fusion" ) if "fusion" in key and "conv" in key: A__ : List[str] =key.replace("conv" , "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" ) A__ : int =value return new_state_dict def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ): """simple docstring""" # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict A__ : List[str] =kv_weight[ : config.hidden_sizes[i], : ] A__ : Dict =kv_bias[: config.hidden_sizes[i]] A__ : Any =kv_weight[ config.hidden_sizes[i] :, : ] A__ : Any =kv_bias[config.hidden_sizes[i] :] def lowercase ( ): """simple docstring""" A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg" A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return image @torch.no_grad() def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ): """simple docstring""" A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) A__ : str =GLPNImageProcessor() # prepare image A__ : Any =prepare_img() A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) ) # rename keys A__ : Union[str, Any] =rename_keys(UpperCamelCase ) # key and value matrices need special treatment read_in_k_v(UpperCamelCase , UpperCamelCase ) # create HuggingFace model and load state dict A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # forward pass A__ : int =model(UpperCamelCase ) A__ : Optional[Any] =outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: A__ : List[Any] =torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: A__ : Tuple =torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) A__ : str =torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) __A : Any = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
656
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __magic_name__ ( unittest.TestCase ): def __init__( self : Union[str, Any] , snake_case_ : str , snake_case_ : str=7 , snake_case_ : str=3 , snake_case_ : Dict=30 , snake_case_ : List[str]=400 , snake_case_ : List[str]=True , snake_case_ : Dict=None , snake_case_ : Optional[int]=True , snake_case_ : str=[0.5, 0.5, 0.5] , snake_case_ : Optional[Any]=[0.5, 0.5, 0.5] , snake_case_ : str=True , snake_case_ : Union[str, Any]=1 / 255 , snake_case_ : int=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __snake_case = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} __snake_case = parent __snake_case = batch_size __snake_case = num_channels __snake_case = min_resolution __snake_case = max_resolution __snake_case = do_resize __snake_case = size __snake_case = do_normalize __snake_case = image_mean __snake_case = image_std __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_pad def lowerCAmelCase ( self : Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : int=False ): if not batched: __snake_case = image_inputs[0] if isinstance(UpperCamelCase__ , Image.Image ): __snake_case = image.size else: __snake_case = image.shape[1], image.shape[2] if w < h: __snake_case = int(self.size["shortest_edge"] * h / w ) __snake_case = self.size["shortest_edge"] elif w > h: __snake_case = self.size["shortest_edge"] __snake_case = int(self.size["shortest_edge"] * w / h ) else: __snake_case = self.size["shortest_edge"] __snake_case = self.size["shortest_edge"] else: __snake_case = [] for image in image_inputs: __snake_case = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __snake_case = max(UpperCamelCase__ , key=lambda snake_case_ : item[0] )[0] __snake_case = max(UpperCamelCase__ , key=lambda snake_case_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = DeformableDetrImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : List[str] ): __snake_case = DeformableDetrImageProcessingTester(self ) @property def lowerCAmelCase ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : List[str] ): __snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_rescale" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_pad" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "size" ) ) def lowerCAmelCase ( self : List[str] ): __snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} ) self.assertEqual(image_processor.do_pad , UpperCamelCase__ ) __snake_case = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase__ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase__ ) def lowerCAmelCase ( self : Tuple ): pass def lowerCAmelCase ( self : Optional[Any] ): # Initialize image_processing __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __snake_case = self.image_processor_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) __snake_case = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : List[Any] ): # Initialize image_processing __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __snake_case = self.image_processor_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values __snake_case = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Tuple ): # Initialize image_processing __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __snake_case = self.image_processor_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values __snake_case = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase ( self : Optional[Any] ): # prepare image and target __snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: __snake_case = json.loads(f.read() ) __snake_case = {"image_id": 39769, "annotations": target} # encode them __snake_case = DeformableDetrImageProcessor() __snake_case = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors="pt" ) # verify pixel values __snake_case = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase__ ) __snake_case = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) ) # verify area __snake_case = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase__ ) ) # verify boxes __snake_case = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase__ ) __snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase__ , atol=1e-3 ) ) # verify image_id __snake_case = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase__ ) ) # verify is_crowd __snake_case = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase__ ) ) # verify class_labels __snake_case = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase__ ) ) # verify orig_size __snake_case = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase__ ) ) # verify size __snake_case = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase__ ) ) @slow def lowerCAmelCase ( self : List[Any] ): # prepare image, target and masks_path __snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: __snake_case = json.loads(f.read() ) __snake_case = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} __snake_case = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them __snake_case = DeformableDetrImageProcessor(format="coco_panoptic" ) __snake_case = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors="pt" ) # verify pixel values __snake_case = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase__ ) __snake_case = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) ) # verify area __snake_case = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase__ ) ) # verify boxes __snake_case = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase__ ) __snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase__ , atol=1e-3 ) ) # verify image_id __snake_case = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase__ ) ) # verify is_crowd __snake_case = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase__ ) ) # verify class_labels __snake_case = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase__ ) ) # verify masks __snake_case = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase__ ) # verify orig_size __snake_case = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase__ ) ) # verify size __snake_case = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase__ ) )
163
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Optional[Any] = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Union[str, Any] = """gpt_neo""" __magic_name__ : Union[str, Any] = ["""past_key_values"""] __magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ): A__ : Optional[Any] =vocab_size A__ : Dict =max_position_embeddings A__ : List[str] =hidden_size A__ : List[Any] =num_layers A__ : Tuple =num_heads A__ : List[str] =intermediate_size A__ : Tuple =window_size A__ : Dict =activation_function A__ : str =resid_dropout A__ : Union[str, Any] =embed_dropout A__ : List[str] =attention_dropout A__ : Tuple =classifier_dropout A__ : int =layer_norm_epsilon A__ : int =initializer_range A__ : str =use_cache A__ : Tuple =bos_token_id A__ : int =eos_token_id A__ : int =attention_types A__ : Any =self.expand_attention_types_params(UpperCamelCase__ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) @staticmethod def _UpperCAmelCase ( UpperCamelCase__ : List[str] ): A__ : Optional[Any] =[] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): """simple docstring""" import torch A__ : List[str] =input.size() A__ : Dict =len(UpperCamelCase ) A__ : Optional[int] =shape[dimension] A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1 A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None] A__ : Tuple =[slice(UpperCamelCase )] * rank A__ : int =indices A__ : Optional[int] =input[s] A__ : Union[str, Any] =list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase ) def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ): """simple docstring""" import torch A__ : List[str] =torch.arange(1 , UpperCamelCase ) A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =remainders == 0 A__ : str =candidates[divisor_indices] A__ : int =torch.max(UpperCamelCase ) return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' @property def _UpperCAmelCase ( self : List[Any] ): A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" ) A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"} else: A__ : Tuple ={0: "batch", 1: "sequence"} return common_inputs @property def _UpperCAmelCase ( self : List[str] ): return self._config.num_heads def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ): A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape # Not using the same length for past_key_values A__ : Union[str, Any] =seqlen + 2 A__ : List[Any] =( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A__ : Optional[Any] =[ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] A__ : Optional[Any] =common_inputs["attention_mask"] if self.use_past: A__ : Any =ordered_inputs["attention_mask"].dtype A__ : Tuple =torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self : List[str] ): return 13
656
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any =[ "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", "MegaForCausalLM", "MegaForMaskedLM", "MegaForMultipleChoice", "MegaForQuestionAnswering", "MegaForSequenceClassification", "MegaForTokenClassification", "MegaModel", "MegaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
434
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Any = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Tuple = """megatron-bert""" def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ): super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) A__ : Optional[int] =vocab_size A__ : Optional[int] =hidden_size A__ : str =num_hidden_layers A__ : Any =num_attention_heads A__ : str =hidden_act A__ : Optional[int] =intermediate_size A__ : str =hidden_dropout_prob A__ : str =attention_probs_dropout_prob A__ : List[Any] =max_position_embeddings A__ : List[Any] =type_vocab_size A__ : Tuple =initializer_range A__ : Any =layer_norm_eps A__ : Any =position_embedding_type A__ : Union[str, Any] =use_cache
656
0
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor __A : List[str] = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' def __init__( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ): warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
16
"""simple docstring""" from __future__ import annotations def lowercase ( UpperCamelCase : list[float] ): """simple docstring""" if len(UpperCamelCase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) A__ : Union[str, Any] =nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
656
0
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput snake_case_ : str = "scheduler_config.json" class A__ ( _UpperCamelCase ): UpperCAmelCase = 1 UpperCAmelCase = 2 UpperCAmelCase = 3 UpperCAmelCase = 4 UpperCAmelCase = 5 @dataclass class A__ ( _UpperCamelCase ): UpperCAmelCase = 42 class A__ : UpperCAmelCase = SCHEDULER_CONFIG_NAME UpperCAmelCase = ["""dtype"""] UpperCAmelCase = [] UpperCAmelCase = True @classmethod def __UpperCamelCase ( cls : Optional[int] , _a : Dict[str, Any] = None , _a : Optional[str] = None , _a : int=False , **_a : Tuple , ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =cls.load_config( pretrained_model_name_or_path=UpperCamelCase__ , subfolder=UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ , **UpperCamelCase__ , ) _SCREAMING_SNAKE_CASE =cls.from_config(UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ , **UpperCamelCase__ ) if hasattr(UpperCamelCase__ , '''create_state''' ) and getattr(UpperCamelCase__ , '''has_state''' , UpperCamelCase__ ): _SCREAMING_SNAKE_CASE =scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, os.PathLike] , _a : bool = False , **_a : List[Any] ) -> Tuple: """simple docstring""" self.save_config(save_directory=UpperCamelCase__ , push_to_hub=UpperCamelCase__ , **UpperCamelCase__ ) @property def __UpperCamelCase ( self : str ) -> Optional[Any]: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls : Union[str, Any] ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE =list(set([cls.__name__] + cls._compatibles ) ) _SCREAMING_SNAKE_CASE =importlib.import_module(__name__.split('''.''' )[0] ) _SCREAMING_SNAKE_CASE =[ getattr(UpperCamelCase__ , UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__ , UpperCamelCase__ ) ] return compatible_classes def lowerCamelCase( a__ ,a__): assert len(a__) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(a__) - x.ndim)) ,a__) def lowerCamelCase( a__ ,a__=0.999 ,a__=jnp.floataa): def alpha_bar(a__): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 _SCREAMING_SNAKE_CASE =[] for i in range(a__): _SCREAMING_SNAKE_CASE =i / num_diffusion_timesteps _SCREAMING_SNAKE_CASE =(i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(a__) / alpha_bar(a__) ,a__)) return jnp.array(a__ ,dtype=a__) @flax.struct.dataclass class A__ : UpperCAmelCase = 42 UpperCAmelCase = 42 UpperCAmelCase = 42 @classmethod def __UpperCamelCase ( cls : Optional[int] , _a : Union[str, Any] ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =scheduler.config if config.trained_betas is not None: _SCREAMING_SNAKE_CASE =jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": _SCREAMING_SNAKE_CASE =jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _SCREAMING_SNAKE_CASE =( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _SCREAMING_SNAKE_CASE =betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" ) _SCREAMING_SNAKE_CASE =1.0 - betas _SCREAMING_SNAKE_CASE =jnp.cumprod(UpperCamelCase__ , axis=0 ) return cls( alphas=UpperCamelCase__ , betas=UpperCamelCase__ , alphas_cumprod=UpperCamelCase__ , ) def lowerCamelCase( a__ ,a__ ,a__ ,a__): _SCREAMING_SNAKE_CASE =state.alphas_cumprod _SCREAMING_SNAKE_CASE =alphas_cumprod[timesteps] ** 0.5 _SCREAMING_SNAKE_CASE =sqrt_alpha_prod.flatten() _SCREAMING_SNAKE_CASE =broadcast_to_shape_from_left(a__ ,original_samples.shape) _SCREAMING_SNAKE_CASE =(1 - alphas_cumprod[timesteps]) ** 0.5 _SCREAMING_SNAKE_CASE =sqrt_one_minus_alpha_prod.flatten() _SCREAMING_SNAKE_CASE =broadcast_to_shape_from_left(a__ ,original_samples.shape) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def lowerCamelCase( a__ ,a__ ,a__ ,a__): _SCREAMING_SNAKE_CASE =get_sqrt_alpha_prod(a__ ,a__ ,a__ ,a__) _SCREAMING_SNAKE_CASE =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def lowerCamelCase( a__ ,a__ ,a__ ,a__): _SCREAMING_SNAKE_CASE =get_sqrt_alpha_prod(a__ ,a__ ,a__ ,a__) _SCREAMING_SNAKE_CASE =sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
691
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A : Optional[Any] = { "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", "MegaForCausalLM", "MegaForMaskedLM", "MegaForMultipleChoice", "MegaForQuestionAnswering", "MegaForSequenceClassification", "MegaForTokenClassification", "MegaModel", "MegaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
656
0
_lowerCamelCase : Union[str, Any] = {str(digit): digit**5 for digit in range(10)} def a_ ( __lowercase : int ) -> Optional[int]: return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__lowercase ) ) def a_ ( ) -> List[str]: return sum( number for number in range(1_000 , 1_000_000 ) if number == digits_fifth_powers_sum(__lowercase ) ) if __name__ == "__main__": print(solution())
686
"""simple docstring""" def lowercase ( UpperCamelCase : int ): """simple docstring""" if num <= 0: raise ValueError("Input must be a positive integer" ) A__ : Union[str, Any] =[True] * (num + 1) A__ : Union[str, Any] =2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , UpperCamelCase ): A__ : str =False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[int] = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
656
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A : List[Any] = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = ["GLPNFeatureExtractor"] A : List[str] = ["GLPNImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST", "GLPNForDepthEstimation", "GLPNLayer", "GLPNModel", "GLPNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
636
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : List[Any] ): A__ : Tuple =torch.nn.Linear(10 , 10 ) A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 ) A__ : Union[str, Any] =Accelerator() A__ : str =accelerator.prepare(UpperCamelCase__ ) try: pickle.loads(pickle.dumps(UpperCamelCase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
656
0
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: int = IFInpaintingSuperResolutionPipeline _lowerCamelCase: List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} _lowerCamelCase: str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _lowerCamelCase: Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: return self._get_superresolution_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int]=0 ) -> str: if str(UpperCamelCase__ ).startswith('mps' ): A = torch.manual_seed(UpperCamelCase__ ) else: A = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) A = floats_tensor((1, 3, 16, 16) ,rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A = floats_tensor((1, 3, 32, 32) ,rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A = floats_tensor((1, 3, 32, 32) ,rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> str: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: self._test_inference_batch_single_identical( expected_max_diff=1e-2 ,)
91
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __A : Optional[int] = None __A : Union[str, Any] = logging.get_logger(__name__) __A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __A : str = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } __A : List[str] = { "google/bigbird-roberta-base": 4_096, "google/bigbird-roberta-large": 4_096, "google/bigbird-base-trivia-itc": 4_096, } __A : Tuple = "▁" class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Dict = VOCAB_FILES_NAMES __magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP __magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ : List[Any] = BigBirdTokenizer __magic_name__ : Any = ["""input_ids""", """attention_mask"""] __magic_name__ : List[int] = [] def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ): A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) A__ : List[Any] =vocab_file A__ : Optional[int] =False if not self.vocab_file else True def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : Tuple =[self.sep_token_id] A__ : str =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1] def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : Tuple =[self.sep_token_id] A__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return A__ : List[str] =os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
656
0
"""simple docstring""" def _UpperCamelCase ( A , A ): UpperCamelCase_ =[0 for i in range(r + 1 )] # nc0 = 1 UpperCamelCase_ =1 for i in range(1 , n + 1 ): # to compute current row from previous row. UpperCamelCase_ =min(A , A ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
391
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[int] = {"vocab_file": "spiece.model"} __A : List[Any] = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ): A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) A__ : Dict =3 A__ : int =do_lower_case A__ : str =remove_space A__ : Optional[Any] =keep_accents A__ : int =vocab_file A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) A__ : Union[str, Any] =jieba A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _UpperCAmelCase ( self : Union[str, Any] ): return len(self.sp_model ) def _UpperCAmelCase ( self : Optional[int] ): A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): A__ : Union[str, Any] =self.__dict__.copy() A__ : Tuple =None return state def __setstate__( self : Tuple , UpperCamelCase__ : int ): A__ : Union[str, Any] =d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A__ : Optional[int] ={} A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ): if self.remove_space: A__ : Optional[int] =" ".join(inputs.strip().split() ) else: A__ : Optional[Any] =inputs A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ ) A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: A__ : str =outputs.lower() return outputs def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ): A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ ) A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) A__ : List[str] =[] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A__ : Union[str, Any] =cur_pieces[1:] else: A__ : List[str] =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ): return self.sp_model.PieceToId(UpperCamelCase__ ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ): return self.sp_model.IdToPiece(UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ): A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : List[str] =[self.sep_token_id] A__ : str =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A__ : List[str] =[self.sep_token_id] A__ : Optional[Any] =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return A__ : Tuple =os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: A__ : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,) def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ): A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ ) A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
656
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __SCREAMING_SNAKE_CASE : Optional[int] = None __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __SCREAMING_SNAKE_CASE : str = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } __SCREAMING_SNAKE_CASE : List[str] = { "google/bigbird-roberta-base": 4096, "google/bigbird-roberta-large": 4096, "google/bigbird-base-trivia-itc": 4096, } __SCREAMING_SNAKE_CASE : Tuple = "▁" class __lowerCAmelCase ( _UpperCamelCase ): """simple docstring""" _UpperCAmelCase : Dict =VOCAB_FILES_NAMES _UpperCAmelCase : Any =PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[Any] =BigBirdTokenizer _UpperCAmelCase : Any =["""input_ids""", """attention_mask"""] _UpperCAmelCase : List[int] =[] def __init__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]="<unk>" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : Optional[Any]="[SEP]" , lowerCAmelCase : List[Any]="[MASK]" , lowerCAmelCase : str="[CLS]" , **lowerCAmelCase : List[Any] , ): A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) A_ = vocab_file A_ = False if not self.vocab_file else True def _UpperCAmelCase ( self : str , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1] def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return A_ = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
452
"""simple docstring""" def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" def count_of_possible_combinations(UpperCamelCase : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(UpperCamelCase ) def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" def count_of_possible_combinations_with_dp_array( UpperCamelCase : int , UpperCamelCase : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A__ : str =sum( count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase ) for item in array ) A__ : List[str] =answer return answer A__ : List[Any] =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase ) def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ): """simple docstring""" A__ : str =[0] * (target + 1) A__ : Optional[Any] =1 for i in range(1 , target + 1 ): for j in range(UpperCamelCase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[Any] = 3 __A : Optional[Any] = 5 __A : int = [1, 2, 5] print(combination_sum_iv(n, array, target))
656
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ : Union[str, Any] = { "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : str = [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] __magic_name__ : Union[str, Any] = ["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys __magic_name__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
281
"""simple docstring""" import math import tensorflow as tf from packaging import version def lowercase ( UpperCamelCase : Optional[Any] ): """simple docstring""" A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowercase ( UpperCamelCase : Optional[int] ): """simple docstring""" A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : Tuple =tf.cast(math.pi , x.dtype ) A__ : Dict =tf.cast(0.04_47_15 , x.dtype ) A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) )) return x * cdf def lowercase ( UpperCamelCase : Optional[int] ): """simple docstring""" A__ : List[str] =tf.convert_to_tensor(UpperCamelCase ) return x * tf.tanh(tf.math.softplus(UpperCamelCase ) ) def lowercase ( UpperCamelCase : List[str] ): """simple docstring""" A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase ) A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype ) A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowercase ( UpperCamelCase : List[Any] ): """simple docstring""" A__ : List[str] =tf.convert_to_tensor(UpperCamelCase ) A__ : str =tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowercase ( UpperCamelCase : Tuple ): """simple docstring""" return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 ) def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ): """simple docstring""" A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase ) return a * tf.math.sigmoid(UpperCamelCase ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def lowercase ( UpperCamelCase : int ): """simple docstring""" return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase ) __A : Optional[Any] = tf.keras.activations.gelu __A : Optional[Any] = approximate_gelu_wrap else: __A : Any = _gelu __A : Union[str, Any] = _gelu_new __A : List[str] = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def lowercase ( UpperCamelCase : List[Any] ): """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
656
0
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase : int =logging.get_logger(__name__) def a__ (__lowercase :Any ) -> Dict: _A : str = OrderedDict() for key, value in state_dict.items(): if key.startswith('''module.encoder''' ): _A : Dict = key.replace('''module.encoder''' , '''glpn.encoder''' ) if key.startswith('''module.decoder''' ): _A : Optional[int] = key.replace('''module.decoder''' , '''decoder.stages''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _A : Tuple = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] _A : Optional[Any] = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(__lowercase )-1}""" ) if "norm" in key: _A : Dict = key.replace('''norm''' , '''layer_norm''' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _A : Any = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )] _A : Tuple = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(__lowercase )-1}""" ) if "layer_norm1" in key: _A : List[Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: _A : Optional[int] = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 _A : int = key[key.find('''block''' ) + len('''block''' )] _A : Optional[Any] = key.replace(f"""block{idx}""" , f"""block.{int(__lowercase )-1}""" ) if "attn.q" in key: _A : Optional[Any] = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: _A : Union[str, Any] = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: _A : str = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: _A : Dict = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: _A : str = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: _A : List[Any] = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: _A : List[str] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) _A : Any = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _A : str = key[key.find('''linear_c''' ) + len('''linear_c''' )] _A : Dict = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(__lowercase )-1}""" ) if "bot_conv" in key: _A : Union[str, Any] = key.replace('''bot_conv''' , '''0.convolution''' ) if "skip_conv1" in key: _A : List[Any] = key.replace('''skip_conv1''' , '''1.convolution''' ) if "skip_conv2" in key: _A : int = key.replace('''skip_conv2''' , '''2.convolution''' ) if "fusion1" in key: _A : Optional[Any] = key.replace('''fusion1''' , '''1.fusion''' ) if "fusion2" in key: _A : Optional[Any] = key.replace('''fusion2''' , '''2.fusion''' ) if "fusion3" in key: _A : int = key.replace('''fusion3''' , '''3.fusion''' ) if "fusion" in key and "conv" in key: _A : List[str] = key.replace('''conv''' , '''convolutional_layer''' ) if key.startswith('''module.last_layer_depth''' ): _A : Tuple = key.replace('''module.last_layer_depth''' , '''head.head''' ) _A : int = value return new_state_dict def a__ (__lowercase :Union[str, Any] , __lowercase :Dict ) -> List[str]: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _A : int = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _A : str = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _A : List[str] = kv_weight[ : config.hidden_sizes[i], : ] _A : Dict = kv_bias[: config.hidden_sizes[i]] _A : Any = kv_weight[ config.hidden_sizes[i] :, : ] _A : Any = kv_bias[config.hidden_sizes[i] :] def a__ () -> str: _A : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" _A : List[Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return image @torch.no_grad() def a__ (__lowercase :str , __lowercase :Tuple , __lowercase :List[str]=False , __lowercase :str=None ) -> str: _A : List[str] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) _A : str = GLPNImageProcessor() # prepare image _A : Any = prepare_img() _A : Optional[int] = image_processor(images=__lowercase , return_tensors='''pt''' ).pixel_values logger.info('''Converting model...''' ) # load original state dict _A : int = torch.load(__lowercase , map_location=torch.device('''cpu''' ) ) # rename keys _A : Union[str, Any] = rename_keys(__lowercase ) # key and value matrices need special treatment read_in_k_v(__lowercase , __lowercase ) # create HuggingFace model and load state dict _A : Optional[int] = GLPNForDepthEstimation(__lowercase ) model.load_state_dict(__lowercase ) model.eval() # forward pass _A : int = model(__lowercase ) _A : Optional[Any] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _A : List[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: _A : Tuple = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) _A : str = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , __lowercase , atol=1e-4 ) print('''Looks ok!''' ) # finally, push to hub if required if push_to_hub: logger.info('''Pushing model and image processor to the hub...''' ) model.push_to_hub( repo_path_or_name=Path(__lowercase , __lowercase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__lowercase , ) image_processor.push_to_hub( repo_path_or_name=Path(__lowercase , __lowercase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__lowercase , ) if __name__ == "__main__": _UpperCamelCase : List[str] =argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) _UpperCamelCase : Any =parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
206
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict ): A__ : Optional[Any] =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ): A__ : Tuple =parent A__ : List[Any] =batch_size A__ : List[Any] =image_size A__ : Union[str, Any] =num_channels A__ : Optional[int] =num_encoder_blocks A__ : Any =sr_ratios A__ : Any =depths A__ : List[Any] =hidden_sizes A__ : List[Any] =downsampling_rates A__ : List[str] =num_attention_heads A__ : int =is_training A__ : List[Any] =use_labels A__ : Any =hidden_act A__ : Dict =hidden_dropout_prob A__ : int =attention_probs_dropout_prob A__ : List[Any] =initializer_range A__ : Tuple =num_labels A__ : List[Any] =scope def _UpperCAmelCase ( self : Optional[int] ): A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ : Any =None if self.use_labels: A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A__ : List[Any] =self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self : Tuple ): return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ): A__ : Any =SegformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : Dict =model(UpperCamelCase__ ) A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ): A__ : str =self.num_labels A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : Optional[Any] =model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ): A__ : Tuple =1 A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ ) A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertGreater(result.loss , 0.0 ) def _UpperCAmelCase ( self : str ): A__ : Union[str, Any] =self.prepare_config_and_inputs() A__ , A__ , A__ : Tuple =config_and_inputs A__ : Tuple ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : Dict = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) __magic_name__ : Optional[int] = ( { """feature-extraction""": SegformerModel, """image-classification""": SegformerForImageClassification, """image-segmentation""": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ : Dict = True __magic_name__ : List[str] = False __magic_name__ : Optional[Any] = False __magic_name__ : str = False def _UpperCAmelCase ( self : Union[str, Any] ): A__ : Union[str, Any] =SegformerModelTester(self ) A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ ) def _UpperCAmelCase ( self : str ): self.config_tester.run_common_tests() def _UpperCAmelCase ( self : Dict ): A__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] ): A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ ) @unittest.skip("SegFormer does not use inputs_embeds" ) def _UpperCAmelCase ( self : Dict ): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" ) def _UpperCAmelCase ( self : Tuple ): pass def _UpperCAmelCase ( self : List[str] ): A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : int =model_class(UpperCamelCase__ ) A__ : Optional[int] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ : Optional[int] =[*signature.parameters.keys()] A__ : List[str] =["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _UpperCAmelCase ( self : str ): A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() A__ : Union[str, Any] =True for model_class in self.all_model_classes: A__ : Optional[Any] =True A__ : Union[str, Any] =False A__ : str =True A__ : Optional[int] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Any =outputs.attentions A__ : List[str] =sum(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ : Dict =True A__ : str =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Union[str, Any] =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first attentions (first block, first layer) A__ : List[Any] =(self.model_tester.image_size // 4) ** 2 A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) A__ : Tuple =(self.model_tester.image_size // 32) ** 2 A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) A__ : int =len(UpperCamelCase__ ) # Check attention is always last and order is fine A__ : Optional[Any] =True A__ : Any =True A__ : Union[str, Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) ) A__ : Optional[Any] =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first attentions (first block, first layer) A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2 A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def _UpperCAmelCase ( self : List[Any] ): def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ): A__ : Optional[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Optional[Any] =outputs.hidden_states A__ : int =self.model_tester.num_encoder_blocks self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : Optional[Any] =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ : str =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[int] ): if not self.model_tester.is_training: return A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common() A__ : List[Any] =True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase__ ): continue A__ : List[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss loss.backward() @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _UpperCAmelCase ( self : Tuple ): pass @slow def _UpperCAmelCase ( self : Tuple ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowercase ( ): """simple docstring""" A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @slow def _UpperCAmelCase ( self : Tuple ): # only resize + normalize A__ : List[Any] =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCamelCase__ ) A__ : Union[str, Any] =prepare_img() A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : int =model(UpperCamelCase__ ) A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A__ : Optional[int] =torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _UpperCAmelCase ( self : Union[str, Any] ): # only resize + normalize A__ : Dict =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : int =SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ ) A__ : Tuple =prepare_img() A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : int =model(UpperCamelCase__ ) A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A__ : List[Any] =torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) ) @slow def _UpperCAmelCase ( self : int ): # only resize + normalize A__ : Optional[Any] =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ ) A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCamelCase__ ) A__ : str =prepare_img() A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" ) A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ ) with torch.no_grad(): A__ : Dict =model(UpperCamelCase__ ) A__ : Any =outputs.logits.detach().cpu() A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] ) A__ : List[str] =torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) A__ : Tuple =torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
656
0
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __magic_name__ ( _UpperCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = """gpt_neo""" _SCREAMING_SNAKE_CASE : Union[str, Any] = ["""past_key_values"""] _SCREAMING_SNAKE_CASE : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Dict , snake_case_ : List[Any]=50257 , snake_case_ : Optional[Any]=2048 , snake_case_ : Tuple=2048 , snake_case_ : int=24 , snake_case_ : Dict=[[["global", "local"], 12]] , snake_case_ : Optional[Any]=16 , snake_case_ : Optional[Any]=None , snake_case_ : str=256 , snake_case_ : List[str]="gelu_new" , snake_case_ : List[str]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : str=0.0 , snake_case_ : List[str]=0.1 , snake_case_ : List[str]=1e-5 , snake_case_ : Any=0.02 , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=50256 , snake_case_ : List[str]=50256 , **snake_case_ : str , ): __snake_case = vocab_size __snake_case = max_position_embeddings __snake_case = hidden_size __snake_case = num_layers __snake_case = num_heads __snake_case = intermediate_size __snake_case = window_size __snake_case = activation_function __snake_case = resid_dropout __snake_case = embed_dropout __snake_case = attention_dropout __snake_case = classifier_dropout __snake_case = layer_norm_epsilon __snake_case = initializer_range __snake_case = use_cache __snake_case = bos_token_id __snake_case = eos_token_id __snake_case = attention_types __snake_case = self.expand_attention_types_params(UpperCamelCase__ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) @staticmethod def lowerCAmelCase ( snake_case_ : List[str] ): __snake_case = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" import torch __snake_case = input.size() __snake_case = len(SCREAMING_SNAKE_CASE ) __snake_case = shape[dimension] __snake_case = torch.arange(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __snake_case = torch.div(sizedim - size , SCREAMING_SNAKE_CASE , rounding_mode="floor" ) + 1 __snake_case = torch.arange(SCREAMING_SNAKE_CASE ) + low_indices[:min_length][:, None] __snake_case = [slice(SCREAMING_SNAKE_CASE )] * rank __snake_case = indices __snake_case = input[s] __snake_case = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" import torch __snake_case = torch.arange(1 , SCREAMING_SNAKE_CASE ) __snake_case = torch.remainder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __snake_case = remainders == 0 __snake_case = candidates[divisor_indices] __snake_case = torch.max(SCREAMING_SNAKE_CASE ) return largest_divisor, torch.div(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rounding_mode="floor" ) class __magic_name__ ( _UpperCamelCase ): @property def lowerCAmelCase ( self : List[Any] ): __snake_case = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" ) __snake_case = {0: "batch", 1: "past_sequence + sequence"} else: __snake_case = {0: "batch", 1: "sequence"} return common_inputs @property def lowerCAmelCase ( self : List[str] ): return self._config.num_heads def lowerCAmelCase ( self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ): __snake_case = super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() __snake_case = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __snake_case = common_inputs["input_ids"].shape # Not using the same length for past_key_values __snake_case = seqlen + 2 __snake_case = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case = [ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] __snake_case = common_inputs["attention_mask"] if self.use_past: __snake_case = ordered_inputs["attention_mask"].dtype __snake_case = torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase ( self : List[str] ): return 13
163
"""simple docstring""" import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ): A__ : str =parent A__ : List[str] =batch_size A__ : Any =seq_length A__ : List[str] =is_training A__ : List[Any] =use_attention_mask A__ : List[Any] =use_token_type_ids A__ : Dict =use_labels A__ : List[Any] =vocab_size A__ : Optional[int] =hidden_size A__ : Optional[Any] =num_hidden_layers A__ : str =num_attention_heads A__ : int =intermediate_size A__ : Tuple =hidden_act A__ : Tuple =hidden_dropout_prob A__ : Dict =attention_probs_dropout_prob A__ : Any =max_position_embeddings A__ : Any =type_vocab_size A__ : Union[str, Any] =type_sequence_label_size A__ : Optional[Any] =initializer_range A__ : int =num_choices def _UpperCAmelCase ( self : Tuple ): A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : List[str] =None if self.use_attention_mask: A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] ) A__ : str =None if self.use_token_type_ids: A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Any =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self : Tuple ): A__ : Dict =self.prepare_config_and_inputs() A__ , A__ , A__ , A__ : str =config_and_inputs A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _UpperCAmelCase ( self : int ): A__ : str =self.prepare_config_and_inputs() A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs A__ : Union[str, Any] =True A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : Union[str, Any] = True __magic_name__ : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self : Optional[int] ): A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self ) @slow def _UpperCAmelCase ( self : List[Any] ): for model_class_name in self.all_model_classes: A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : Union[str, Any] =model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @slow def _UpperCAmelCase ( self : Tuple ): A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) A__ : str =model(UpperCamelCase__ )[0] A__ : List[Any] =[1, 11, 50265] self.assertEqual(list(output.shape ) , UpperCamelCase__ ) # compare the actual values for a slice. A__ : Any =np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _UpperCAmelCase ( self : List[Any] ): A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ ) A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa ) A__ : Dict =model(UpperCamelCase__ )[0] # compare the actual values for a slice. A__ : Optional[Any] =np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
656
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging SCREAMING_SNAKE_CASE__ : Optional[Any] =logging.get_logger(__name__) if is_vision_available(): import PIL class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" __snake_case = ["""pixel_values"""] def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = None , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = None , _lowercase = None , _lowercase = True , **_lowercase , ) -> str: super().__init__(**UpperCamelCase__ ) _lowerCamelCase : List[Any] = size if size is not None else {"shortest_edge": 224} _lowerCamelCase : Optional[int] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) _lowerCamelCase : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224} _lowerCamelCase : Dict = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name='''crop_size''' ) _lowerCamelCase : Union[str, Any] = do_resize _lowerCamelCase : Any = size _lowerCamelCase : List[Any] = resample _lowerCamelCase : Optional[int] = do_center_crop _lowerCamelCase : Tuple = crop_size _lowerCamelCase : List[str] = do_rescale _lowerCamelCase : Tuple = rescale_factor _lowerCamelCase : str = do_normalize _lowerCamelCase : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _lowerCamelCase : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD _lowerCamelCase : List[Any] = do_convert_rgb def a__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> Tuple: _lowerCamelCase : str = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _lowerCamelCase : Tuple = get_resize_output_image_size(UpperCamelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def a__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> Tuple: _lowerCamelCase : List[str] = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def a__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> Tuple: return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> List[str]: return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def a__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> Any: _lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : str = size if size is not None else self.size _lowerCamelCase : str = get_size_dict(UpperCamelCase__ , param_name='''size''' , default_to_square=UpperCamelCase__ ) _lowerCamelCase : Any = resample if resample is not None else self.resample _lowerCamelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Tuple = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Union[str, Any] = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' , default_to_square=UpperCamelCase__ ) _lowerCamelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Optional[Any] = image_std if image_std is not None else self.image_std _lowerCamelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _lowerCamelCase : List[str] = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _lowerCamelCase : str = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. _lowerCamelCase : Optional[int] = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: _lowerCamelCase : int = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: _lowerCamelCase : Tuple = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: _lowerCamelCase : Dict = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: _lowerCamelCase : Optional[int] = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] _lowerCamelCase : List[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] _lowerCamelCase : Optional[int] = {"pixel_values": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
434
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __A : List[Any] = logging.get_logger(__name__) __A : Any = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] __A : Optional[int] = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def lowercase ( UpperCamelCase : Tuple ): """simple docstring""" A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" ) return sd def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ): """simple docstring""" A__ : List[str] =OrderedDict() A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A__ : Optional[Any] =key for name_pair in rename_keys_prefix: A__ : int =new_key.replace(name_pair[0] , name_pair[1] ) A__ : Dict =d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A__ : Optional[int] =new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: A__ : Any ="pretraining" if "vcr" in checkpoint_path: A__ : Union[str, Any] ={"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: A__ : Optional[Any] ={"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: A__ : List[str] ={"visual_embedding_dim": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 512} A__ : List[str] ="multichoice" elif "vqa_advanced" in checkpoint_path: A__ : Any ={"visual_embedding_dim": 2048} A__ : str ="vqa_advanced" elif "vqa" in checkpoint_path: A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129} A__ : str ="vqa" elif "nlvr" in checkpoint_path: A__ : str ={ "visual_embedding_dim": 1024, "num_labels": 2, } A__ : Dict ="nlvr" A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase ) # Load State Dict A__ : int =load_state_dict(UpperCamelCase ) A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase ) if model_type == "pretraining": A__ : str =VisualBertForPreTraining(UpperCamelCase ) elif model_type == "vqa": A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase ) elif model_type == "nlvr": A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase ) elif model_type == "multichoice": A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) # Save Checkpoints Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": __A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") __A : str = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
656
0
def __a ( A__ : int ): if num <= 0: raise ValueError("Input must be a positive integer" ) SCREAMING_SNAKE_CASE = [True] * (num + 1) SCREAMING_SNAKE_CASE = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , A__ ): SCREAMING_SNAKE_CASE = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[int] = int(input('Enter a positive integer: ').strip()) print(prime_sieve_eratosthenes(user_num))
16
"""simple docstring""" __A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)} def lowercase ( UpperCamelCase : int ): """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) ) def lowercase ( ): """simple docstring""" return sum( number for number in range(1000 , 1000000 ) if number == digits_fifth_powers_sum(UpperCamelCase ) ) if __name__ == "__main__": print(solution())
656
0
from __future__ import annotations snake_case_ : Optional[int] = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } class A__ : def __init__( self : str , _a : dict[str, list[str]] , _a : str ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =graph # mapping node to its parent in resulting breadth first tree _SCREAMING_SNAKE_CASE ={} _SCREAMING_SNAKE_CASE =source_vertex def __UpperCamelCase ( self : int ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE ={self.source_vertex} _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =[self.source_vertex] # first in first out queue while queue: _SCREAMING_SNAKE_CASE =queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(UpperCamelCase__ ) _SCREAMING_SNAKE_CASE =vertex queue.append(UpperCamelCase__ ) def __UpperCamelCase ( self : int , _a : str ) -> List[Any]: """simple docstring""" if target_vertex == self.source_vertex: return self.source_vertex _SCREAMING_SNAKE_CASE =self.parent.get(UpperCamelCase__ ) if target_vertex_parent is None: _SCREAMING_SNAKE_CASE =( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(UpperCamelCase__ ) return self.shortest_path(UpperCamelCase__ ) + f"->{target_vertex}" if __name__ == "__main__": snake_case_ : int = Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
691
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __A : Optional[Any] = logging.get_logger(__name__) # General docstring __A : str = "PoolFormerConfig" # Base docstring __A : Optional[Any] = "sail/poolformer_s12" __A : List[Any] = [1, 512, 7, 7] # Image classification docstring __A : List[str] = "sail/poolformer_s12" __A : Tuple = "tabby, tabby cat" __A : Tuple = [ "sail/poolformer_s12", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ): """simple docstring""" if drop_prob == 0.0 or not training: return input A__ : Tuple =1 - drop_prob A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor return output class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ): super().__init__() A__ : Optional[int] =drop_prob def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ): return drop_path(UpperCamelCase__ , self.drop_prob , self.training ) def _UpperCAmelCase ( self : List[str] ): return "p={}".format(self.drop_prob ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): super().__init__() A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size) A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride) A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding) A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ ) A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ): A__ : List[str] =self.projection(UpperCamelCase__ ) A__ : Any =self.norm(UpperCamelCase__ ) return embeddings class __lowerCAmelCase ( nn.GroupNorm): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ): super().__init__() A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ): return self.pool(UpperCamelCase__ ) - hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ): super().__init__() A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 ) A__ : Dict =PoolFormerDropPath(UpperCamelCase__ ) if isinstance(config.hidden_act , UpperCamelCase__ ): A__ : Tuple =ACTaFN[config.hidden_act] else: A__ : Optional[Any] =config.hidden_act def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ): A__ : Optional[Any] =self.conva(UpperCamelCase__ ) A__ : List[str] =self.act_fn(UpperCamelCase__ ) A__ : List[str] =self.drop(UpperCamelCase__ ) A__ : Optional[int] =self.conva(UpperCamelCase__ ) A__ : Optional[Any] =self.drop(UpperCamelCase__ ) return hidden_states class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ): super().__init__() A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ ) A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) A__ : int =PoolFormerGroupNorm(UpperCamelCase__ ) # Useful for training neural nets A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity() A__ : Optional[Any] =config.use_layer_scale if config.use_layer_scale: A__ : List[str] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) A__ : List[Any] =nn.Parameter( config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ): if self.use_layer_scale: A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) ) A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : Tuple =() A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) ) A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection A__ : str =hidden_states + self.drop_path(UpperCamelCase__ ) A__ : List[Any] =(output,) + outputs return outputs else: A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) ) # First residual connection A__ : Optional[Any] =pooling_output + hidden_states A__ : Tuple =() # Second residual connection inside the PoolFormerOutput block A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) ) A__ : Any =hidden_states + layer_output A__ : Tuple =(output,) + outputs return outputs class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : List[str] ): super().__init__() A__ : Tuple =config # stochastic depth decay rule A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings A__ : Tuple =[] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) A__ : List[str] =nn.ModuleList(UpperCamelCase__ ) # Transformer blocks A__ : Union[str, Any] =[] A__ : Any =0 for i in range(config.num_encoder_blocks ): # each block consists of layers A__ : Union[str, Any] =[] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(UpperCamelCase__ ) ) A__ : str =nn.ModuleList(UpperCamelCase__ ) def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ): A__ : Union[str, Any] =() if output_hidden_states else None A__ : Dict =pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): A__ , A__ : List[Any] =layers # Get patch embeddings from hidden_states A__ : Any =embedding_layer(UpperCamelCase__ ) # Send the embeddings through the blocks for _, blk in enumerate(UpperCamelCase__ ): A__ : List[str] =blk(UpperCamelCase__ ) A__ : Tuple =layer_outputs[0] if output_hidden_states: A__ : List[Any] =all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : List[str] = PoolFormerConfig __magic_name__ : int = """poolformer""" __magic_name__ : Any = """pixel_values""" __magic_name__ : Any = True def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ): if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCamelCase__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): A__ : Optional[Any] =value __A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n" @add_start_docstrings( """The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : Dict ): super().__init__(UpperCamelCase__ ) A__ : List[Any] =config A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ ) # Initialize weights and apply final processing self.post_init() def _UpperCAmelCase ( self : Tuple ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : int =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) A__ : List[Any] =self.encoder( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : int =encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , ) class __lowerCAmelCase ( nn.Module): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ): super().__init__() A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size ) def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ): A__ : int =self.dense(UpperCamelCase__ ) return output @add_start_docstrings( """ PoolFormer Model transformer with an image classification head on top """ , _UpperCamelCase , ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : str ): super().__init__(UpperCamelCase__ ) A__ : List[str] =config.num_labels A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ ) # Final norm A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head A__ : Dict =( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ): A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict A__ : List[str] =self.poolformer( UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , ) A__ : str =outputs[0] A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) ) A__ : Optional[Any] =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A__ : int ="regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A__ : Tuple ="single_label_classification" else: A__ : Optional[int] ="multi_label_classification" if self.config.problem_type == "regression": A__ : Dict =MSELoss() if self.num_labels == 1: A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() ) else: A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) elif self.config.problem_type == "single_label_classification": A__ : Tuple =CrossEntropyLoss() A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A__ : List[Any] =BCEWithLogitsLoss() A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ ) if not return_dict: A__ : Optional[int] =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
656
0
def a_ ( __lowercase : int , __lowercase : int ) -> str: if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) _snake_case = str(bin(__lowercase ) )[2:] # remove the leading "0b" _snake_case = str(bin(__lowercase ) )[2:] # remove the leading "0b" _snake_case = max(len(__lowercase ) , len(__lowercase ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(__lowercase ) , b_binary.zfill(__lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
686
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : int = IFInpaintingSuperResolutionPipeline __magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""}) __magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def _UpperCAmelCase ( self : Union[str, Any] ): return self._get_superresolution_dummy_components() def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ): if str(UpperCamelCase__ ).startswith("mps" ): A__ : Any =torch.manual_seed(UpperCamelCase__ ) else: A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) A__ : List[str] ={ "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _UpperCAmelCase ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _UpperCAmelCase ( self : int ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _UpperCAmelCase ( self : Tuple ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _UpperCAmelCase ( self : str ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _UpperCAmelCase ( self : Dict ): self._test_save_load_local() def _UpperCAmelCase ( self : Optional[int] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
656
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A : str = logging.get_logger(__name__) A : str = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class _UpperCamelCase ( _UpperCamelCase ): '''simple docstring''' __UpperCAmelCase : List[str] ="""cvt""" def __init__( self , __a=3 , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[64, 1_92, 3_84] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[4.0, 4.0, 4.0] , __a=[0.0, 0.0, 0.0] , __a=[0.0, 0.0, 0.0] , __a=[0.0, 0.0, 0.1] , __a=[True, True, True] , __a=[False, False, True] , __a=["dw_bn", "dw_bn", "dw_bn"] , __a=[3, 3, 3] , __a=[1, 1, 1] , __a=[2, 2, 2] , __a=[1, 1, 1] , __a=[1, 1, 1] , __a=0.0_2 , __a=1e-1_2 , **__a , ): super().__init__(**UpperCamelCase__ ) __lowerCAmelCase = num_channels __lowerCAmelCase = patch_sizes __lowerCAmelCase = patch_stride __lowerCAmelCase = patch_padding __lowerCAmelCase = embed_dim __lowerCAmelCase = num_heads __lowerCAmelCase = depth __lowerCAmelCase = mlp_ratio __lowerCAmelCase = attention_drop_rate __lowerCAmelCase = drop_rate __lowerCAmelCase = drop_path_rate __lowerCAmelCase = qkv_bias __lowerCAmelCase = cls_token __lowerCAmelCase = qkv_projection_method __lowerCAmelCase = kernel_qkv __lowerCAmelCase = padding_kv __lowerCAmelCase = stride_kv __lowerCAmelCase = padding_q __lowerCAmelCase = stride_q __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps
636
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A : Any = { "configuration_efficientformer": [ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientFormerConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["EfficientFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", "EfficientFormerPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", "TFEfficientFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
656
0
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ,A_ : str ,A_ : Optional[Any]=13 ,A_ : int=[30, 30] ,A_ : Optional[Any]=2 ,A_ : int=3 ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : List[str]=32 ,A_ : List[Any]=5 ,A_ : Union[str, Any]=4 ,A_ : List[Any]=37 ,A_ : Tuple="gelu" ,A_ : List[Any]=0.1 ,A_ : str=0.1 ,A_ : Tuple=10 ,A_ : Tuple=0.02 ,A_ : Any=3 ,A_ : str=None ,A_ : Tuple=8 ,A_ : Dict=10 ,) -> Optional[Any]: A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = num_labels A = scope A = n_targets A = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens A = (image_size[1] // patch_size) * (image_size[0] // patch_size) A = num_patches + 1 + self.num_detection_tokens def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: A = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) A = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) A = [] for i in range(self.batch_size ): A = {} A = torch.randint( high=self.num_labels ,size=(self.n_targets,) ,device=UpperCamelCase__ ) A = torch.rand(self.n_targets ,4 ,device=UpperCamelCase__ ) labels.append(UpperCamelCase__ ) A = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: return YolosConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCamelCase__ ,initializer_range=self.initializer_range ,num_detection_tokens=self.num_detection_tokens ,num_labels=self.num_labels ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[Any] ,A_ : Optional[int] ,A_ : List[str] ) -> Optional[int]: A = YolosModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.expected_seq_len, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : str ,A_ : str ) -> Optional[Any]: A = YolosForObjectDetection(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A = model(pixel_values=UpperCamelCase__ ) A = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) ) A = model(pixel_values=UpperCamelCase__ ,labels=UpperCamelCase__ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = self.prepare_config_and_inputs() A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () _lowerCamelCase: Any = ( {"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {} ) _lowerCamelCase: Dict = False _lowerCamelCase: Optional[Any] = False _lowerCamelCase: Tuple = False _lowerCamelCase: Optional[int] = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Optional[Any]=False ) -> str: A = super()._prepare_for_class(UpperCamelCase__ ,UpperCamelCase__ ,return_labels=UpperCamelCase__ ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": A = [] for i in range(self.model_tester.batch_size ): A = {} A = torch.ones( size=(self.model_tester.n_targets,) ,device=UpperCamelCase__ ,dtype=torch.long ) A = torch.ones( self.model_tester.n_targets ,4 ,device=UpperCamelCase__ ,dtype=torch.float ) labels.append(UpperCamelCase__ ) A = labels return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: A = YolosModelTester(self ) A = ConfigTester(self ,config_class=UpperCamelCase__ ,has_text_modality=UpperCamelCase__ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: # YOLOS does not use inputs_embeds pass def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ ,nn.Linear ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(UpperCamelCase__ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] ,UpperCamelCase__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs_for_common() A = True # in YOLOS, the seq_len is different A = self.model_tester.expected_seq_len for model_class in self.all_model_classes: A = True A = False A = True A = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(UpperCamelCase__ ,UpperCamelCase__ ) ) A = outputs.attentions self.assertEqual(len(UpperCamelCase__ ) ,self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A = True A = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(UpperCamelCase__ ,UpperCamelCase__ ) ) A = outputs.attentions self.assertEqual(len(UpperCamelCase__ ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,) A = len(UpperCamelCase__ ) # Check attention is always last and order is fine A = True A = True A = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(UpperCamelCase__ ,UpperCamelCase__ ) ) A = 1 self.assertEqual(out_len + added_hidden_states ,len(UpperCamelCase__ ) ) A = outputs.attentions self.assertEqual(len(UpperCamelCase__ ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: def check_hidden_states_output(A_ : Optional[Any] ,A_ : List[Any] ,A_ : Tuple ): A = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(UpperCamelCase__ ,UpperCamelCase__ ) ) A = outputs.hidden_states A = getattr( self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase__ ) ,UpperCamelCase__ ) # YOLOS has a different seq_length A = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,) A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = True check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A = True check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = YolosModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: A = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(UpperCamelCase__ ) A = self.default_image_processor A = prepare_img() A = image_processor(images=UpperCamelCase__ ,return_tensors='pt' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): A = model(inputs.pixel_values ) # verify outputs A = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape ,UpperCamelCase__ ) A = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] ,device=UpperCamelCase__ ,) A = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] ,device=UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,UpperCamelCase__ ,atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] ,UpperCamelCase__ ,atol=1e-4 ) ) # verify postprocessing A = image_processor.post_process_object_detection( UpperCamelCase__ ,threshold=0.3 ,target_sizes=[image.size[::-1]] )[0] A = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(UpperCamelCase__ ) A = [75, 75, 17, 63, 17] A = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(UpperCamelCase__ ) self.assertEqual(len(results['scores'] ) ,5 ) self.assertTrue(torch.allclose(results['scores'] ,UpperCamelCase__ ,atol=1e-4 ) ) self.assertSequenceEqual(results['labels'].tolist() ,UpperCamelCase__ ) self.assertTrue(torch.allclose(results['boxes'][0, :] ,UpperCamelCase__ ) )
91
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Tuple =[] for _ in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Dict =[] for step in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" ) torch.save(scheduler.state_dict() , UpperCamelCase ) A__ : Dict =torch.load(UpperCamelCase ) scheduler.load_state_dict(UpperCamelCase ) return lrs @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] ) A__ : Any =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _UpperCAmelCase ( self : Dict ): A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Dict =torch.tensor([0.4, 0.2, -0.5] ) A__ : Optional[int] =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : int =Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , ) for _ in range(1000 ): A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' __magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None __magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None __magic_name__ : Union[str, Any] = 10 def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A__ : Union[str, Any] ={ get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A__ , A__ : Any =data A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : str ): A__ : int =fn def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ): return self.fn(*UpperCamelCase__ , **UpperCamelCase__ ) @classmethod def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ): A__ : str =list(map(self , scheduler.lr_lambdas ) )
656
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING A_ = logging.get_logger(__name__) class __lowerCAmelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCamelCase : List[Any] = """upernet""" def __init__( self: List[str] , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: Union[str, Any]=[1, 2, 3, 6] , UpperCamelCase_: int=True , UpperCamelCase_: str=0.4 , UpperCamelCase_: List[str]=384 , UpperCamelCase_: str=256 , UpperCamelCase_: Union[str, Any]=1 , UpperCamelCase_: str=False , UpperCamelCase_: Dict=255 , **UpperCamelCase_: int , ): super().__init__(**UpperCamelCase__ ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCamelCase_ =CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase_ =backbone_config.get("model_type" ) UpperCamelCase_ =CONFIG_MAPPING[backbone_model_type] UpperCamelCase_ =config_class.from_dict(UpperCamelCase__ ) UpperCamelCase_ =backbone_config UpperCamelCase_ =hidden_size UpperCamelCase_ =initializer_range UpperCamelCase_ =pool_scales UpperCamelCase_ =use_auxiliary_head UpperCamelCase_ =auxiliary_loss_weight UpperCamelCase_ =auxiliary_in_channels UpperCamelCase_ =auxiliary_channels UpperCamelCase_ =auxiliary_num_convs UpperCamelCase_ =auxiliary_concat_input UpperCamelCase_ =loss_ignore_index def UpperCamelCase__ ( self: Optional[int] ): UpperCamelCase_ =copy.deepcopy(self.__dict__ ) UpperCamelCase_ =self.backbone_config.to_dict() UpperCamelCase_ =self.__class__.model_type return output
391
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __A : List[Any] = logging.get_logger("transformers.models.speecht5") __A : Optional[Any] = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __A : Optional[int] = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __A : List[str] = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __A : List[Any] = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __A : Union[str, Any] = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __A : Any = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __A : Union[str, Any] = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __A : Optional[int] = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __A : Union[str, Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __A : Optional[Any] = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : Optional[int] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __A : int = [] __A : int = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __A : Optional[Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __A : Tuple = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __A : Union[str, Any] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ): """simple docstring""" for attribute in key.split("." ): A__ : Dict =getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape else: A__ : Tuple =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": A__ : Any =value elif weight_type == "weight_g": A__ : Any =value elif weight_type == "weight_v": A__ : Any =value elif weight_type == "bias": A__ : Tuple =value elif weight_type == "running_mean": A__ : Dict =value elif weight_type == "running_var": A__ : List[str] =value elif weight_type == "num_batches_tracked": A__ : Dict =value else: A__ : Optional[int] =value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ): """simple docstring""" for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: A__ , A__ : List[str] =key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ): """simple docstring""" A__ : Tuple =[] if task == "s2t": A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder A__ : int =MAPPING_S2T A__ : List[Any] =IGNORE_KEYS_S2T elif task == "t2s": A__ : Union[str, Any] =None A__ : List[Any] =MAPPING_T2S A__ : Tuple =IGNORE_KEYS_T2S elif task == "s2s": A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder A__ : Tuple =MAPPING_S2S A__ : Any =IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase , UpperCamelCase ): logger.info(F'''{name} was ignored''' ) continue A__ : Optional[Any] =False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , ) A__ : List[Any] =True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: A__ , A__ : Dict =key.split(".*." ) if prefix in name and suffix in name: A__ : int =suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: A__ : List[Any] =True if "*" in mapped_key: A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2] A__ : int =mapped_key.replace("*" , UpperCamelCase ) if "weight_g" in name: A__ : str ="weight_g" elif "weight_v" in name: A__ : Optional[Any] ="weight_v" elif "bias" in name: A__ : Any ="bias" elif "weight" in name: A__ : Optional[int] ="weight" elif "running_mean" in name: A__ : Tuple ="running_mean" elif "running_var" in name: A__ : Optional[int] ="running_var" elif "num_batches_tracked" in name: A__ : str ="num_batches_tracked" else: A__ : List[Any] =None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ): """simple docstring""" A__ : Any =full_name.split("conv_layers." )[-1] A__ : Dict =name.split("." ) A__ : int =int(items[0] ) A__ : str =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A__ : Optional[Any] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A__ : Optional[int] =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) A__ : Any =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase ) @torch.no_grad() def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ): """simple docstring""" if config_path is not None: A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase ) else: A__ : Any =SpeechTaConfig() if task == "s2t": A__ : Union[str, Any] =config.max_text_positions A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase ) elif task == "t2s": A__ : str =1876 A__ : Optional[int] =600 A__ : Tuple =config.max_speech_positions A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase ) elif task == "s2s": A__ : str =1876 A__ : Tuple =config.max_speech_positions A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) A__ : int =mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) A__ : Dict =SpeechTaFeatureExtractor() A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) A__ : Union[str, Any] =torch.load(UpperCamelCase ) recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase ) model.push_to_hub(UpperCamelCase ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __A : str = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
656
0
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __SCREAMING_SNAKE_CASE : Tuple = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __SCREAMING_SNAKE_CASE : Any = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): A_ = SavedModel() A_ = [] with open(os.path.join(UpperCamelCase_ , "utils" , "tf_ops" , "onnx.json" ) ) as f: A_ = json.load(UpperCamelCase_ )["opsets"] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(UpperCamelCase_ )] ) with open(UpperCamelCase_ , "rb" ) as f: saved_model.ParseFromString(f.read() ) A_ = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want A_ = sorted(UpperCamelCase_ ) A_ = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(UpperCamelCase_ ) if strict and len(UpperCamelCase_ ) > 0: raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops ) elif len(UpperCamelCase_ ) > 0: print(f"Found the following incompatible ops for the opset {opset}:" ) print(*UpperCamelCase_ , sep="\n" ) else: print(f"The saved model {saved_model_path} can properly be converted with ONNX." ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) __SCREAMING_SNAKE_CASE : str = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
452
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase): '''simple docstring''' __magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ): super().__init__() A__ : Dict =prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) A__ : Optional[int] =prefix_inner_dim A__ : Optional[int] =prefix_hidden_dim A__ : Optional[int] =( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : Optional[int] =( nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) A__ : str =GPTaConfig( vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , ) A__ : Any =GPTaLMHeadModel(UpperCamelCase__ ) def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ): A__ : int =self.transformer.transformer.wte(UpperCamelCase__ ) A__ : Tuple =self.encode_prefix(UpperCamelCase__ ) A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ ) A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 ) A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ): return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ ) def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ): return self.encode_prefix(UpperCamelCase__ ) @torch.no_grad() def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ): A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 ) A__ : List[str] =[] A__ : Dict =[] for feature in features: A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature # Only support beam search for now A__ , A__ : Optional[Any] =self.generate_beam( input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A__ : Optional[Any] =torch.stack(UpperCamelCase__ ) A__ : Optional[int] =torch.stack(UpperCamelCase__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ): A__ : str =eos_token_id A__ : Optional[Any] =None A__ : int =None A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int ) A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool ) if input_embeds is not None: A__ : Union[str, Any] =input_embeds else: A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ ) A__ : Tuple =outputs.logits A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A__ : Optional[Any] =logits.softmax(-1 ).log() if scores is None: A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 ) A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] ) A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A__ : str =next_tokens else: A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] ) A__ : str =torch.cat((tokens, next_tokens) , dim=1 ) else: A__ : Union[str, Any] =-float(np.inf ) A__ : Dict =0 A__ : Optional[Any] =scores[:, None] + logits seq_lengths[~is_stopped] += 1 A__ : Optional[Any] =scores_sum / seq_lengths[:, None] A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 ) A__ : Tuple =next_tokens // scores_sum.shape[1] A__ : List[Any] =seq_lengths[next_tokens_source] A__ : int =next_tokens % scores_sum.shape[1] A__ : str =next_tokens.unsqueeze(1 ) A__ : List[Any] =tokens[next_tokens_source] A__ : int =torch.cat((tokens, next_tokens) , dim=1 ) A__ : List[str] =generated[next_tokens_source] A__ : Optional[Any] =scores_sum_average * seq_lengths A__ : Optional[int] =is_stopped[next_tokens_source] A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A__ : str =torch.cat((generated, next_token_embed) , dim=1 ) A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze() if is_stopped.all(): break A__ : Optional[int] =scores / seq_lengths A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ ) # tokens tensors are already padded to max_seq_length A__ : int =[tokens[i] for i in order] A__ : Any =torch.stack(UpperCamelCase__ , dim=0 ) A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
656
0
"""simple docstring""" import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def a_ ( lowercase__ :Optional[int], lowercase__ :List[Any]=(), lowercase__ :Optional[Any]=None, lowercase__ :Dict="no", lowercase__ :Union[str, Any]="29500" ): __lowerCamelCase = False __lowerCamelCase = False if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ): __lowerCamelCase = True elif "IPython" in sys.modules: __lowerCamelCase = "google.colab" in str(sys.modules["""IPython"""].get_ipython() ) try: __lowerCamelCase = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' ) if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""", lowercase__ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """ """your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if num_processes is None: __lowerCamelCase = 8 __lowerCamelCase = PrepareForLaunch(lowercase__, distributed_type="""TPU""" ) print(f'Launching a training on {num_processes} TPU cores.' ) xmp.spawn(lowercase__, args=lowercase__, nprocs=lowercase__, start_method="""fork""" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on one CPU.""" ) function(*lowercase__ ) else: if num_processes is None: raise ValueError( """You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """ """inside your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if torch.cuda.is_initialized(): raise ValueError( """To launch a multi-GPU training from your notebook, you need to avoid running any instruction """ """using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """ """function.""" ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=lowercase__, master_addr="""127.0.01""", master_port=lowercase__, mixed_precision=lowercase__ ): __lowerCamelCase = PrepareForLaunch(lowercase__, distributed_type="""MULTI_GPU""" ) print(f'Launching training on {num_processes} GPUs.' ) try: start_processes(lowercase__, args=lowercase__, nprocs=lowercase__, start_method="""fork""" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( """CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """ """This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """ """Please review your imports and test them when running the `notebook_launcher()` to identify """ """which one is problematic.""" ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): __lowerCamelCase = "1" print("""Launching training on MPS.""" ) elif torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on CPU.""" ) function(*lowercase__ ) def a_ ( lowercase__ :Any, lowercase__ :Any=(), lowercase__ :List[str]=2 ): from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=lowercase__, master_addr="""127.0.01""", master_port="""29500""", accelerate_mixed_precision="""no""", accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu="""yes""", ): __lowerCamelCase = PrepareForLaunch(lowercase__, debug=lowercase__ ) start_processes(lowercase__, args=lowercase__, nprocs=lowercase__, start_method="""fork""" )
281
"""simple docstring""" import os def lowercase ( ): """simple docstring""" A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) ) A__ : str =os.path.join(UpperCamelCase , "triangle.txt" ) with open(UpperCamelCase ) as f: A__ : Optional[int] =f.readlines() A__ : str =[] for line in triangle: A__ : Union[str, Any] =[] for number in line.strip().split(" " ): numbers_from_line.append(int(UpperCamelCase ) ) a.append(UpperCamelCase ) for i in range(1 , len(UpperCamelCase ) ): for j in range(len(a[i] ) ): A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0 A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(UpperCamelCase , UpperCamelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
656
0
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging _UpperCamelCase : Dict =logging.get_logger(__name__) def a__ (__lowercase :nn.ModuleList , __lowercase :nn.ModuleList , __lowercase :List[int] ) -> Any: _A : List[str] = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(__lowercase ) == len(__lowercase ), f"""{len(__lowercase )} != {len(__lowercase )}""" dest_layers.load_state_dict(layers_to_copy.state_dict() ) _UpperCamelCase : int ={ # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } _UpperCamelCase : List[str] ={ # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def a__ (__lowercase :Tuple , __lowercase :int ) -> Optional[int]: try: _A : int = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" f""" {n_student}""" ) return list(range(__lowercase ) ) def a__ (__lowercase :int , __lowercase :int ) -> Union[str, Any]: if n_student > n_teacher: raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" ) elif n_teacher == n_student: return list(range(__lowercase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def a__ (__lowercase :Union[str, PreTrainedModel] , __lowercase :Union[str, Path] = "student" , __lowercase :Union[int, None] = None , __lowercase :Union[int, None] = None , __lowercase :int=False , __lowercase :Optional[int]=None , __lowercase :Optional[Any]=None , **__lowercase :Dict , ) -> Union[str, Any]: _A : Optional[int] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher." assert (e is not None) or (d is not None), _msg if isinstance(__lowercase , __lowercase ): AutoTokenizer.from_pretrained(__lowercase ).save_pretrained(__lowercase ) # purely for convenience _A : int = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ).eval() else: assert isinstance(__lowercase , __lowercase ), f"""teacher must be a model or string got type {type(__lowercase )}""" _A : Optional[Any] = teacher.config.to_diff_dict() try: _A : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: _A : Any = teacher_e if d is None: _A : Tuple = teacher_d init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} ) except AttributeError: # T5 if hasattr(teacher.config , '''num_encoder_layers''' ): _A : Any = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: _A : Dict = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: _A : Any = teacher_e if d is None: _A : Union[str, Any] = teacher_d if hasattr(teacher.config , '''num_encoder_layers''' ): init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} ) else: init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(__lowercase ) # Copy weights _A : List[Any] = teacher.config_class(**__lowercase ) _A : str = AutoModelForSeqaSeqLM.from_config(__lowercase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. _A : List[Any] = student.load_state_dict(teacher.state_dict() , strict=__lowercase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save _A : Any = list(range(__lowercase ) ), list(range(__lowercase ) ) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" f""" {save_path}""" ) student.save_pretrained(__lowercase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: _A : List[int] = pick_layers_to_copy(__lowercase , __lowercase ) if d_layers_to_copy is None: _A : List[int] = pick_layers_to_copy(__lowercase , __lowercase ) try: if hasattr( __lowercase , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowercase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowercase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowercase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowercase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , __lowercase ) copy_layers(teacher.decoder.block , student.decoder.block , __lowercase ) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" ) _A : List[str] = { "teacher_type": teacher.config.model_type, "copied_encoder_layers": e_layers_to_copy, "copied_decoder_layers": d_layers_to_copy, } student.save_pretrained(__lowercase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
206
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A : int = logging.get_logger(__name__) def lowercase ( UpperCamelCase : Any ): """simple docstring""" A__ : str =OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): A__ : Dict =key.replace("module.encoder" , "glpn.encoder" ) if key.startswith("module.decoder" ): A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )] A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' ) if "norm" in key: A__ : Dict =key.replace("norm" , "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' ) if "layer_norm1" in key: A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 A__ : int =key[key.find("block" ) + len("block" )] A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' ) if "attn.q" in key: A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: A__ : str =key.replace("attn" , "attention.self" ) if "fc1" in key: A__ : Dict =key.replace("fc1" , "dense1" ) if "fc2" in key: A__ : str =key.replace("fc2" , "dense2" ) if "linear_pred" in key: A__ : List[Any] =key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" ) A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A__ : str =key[key.find("linear_c" ) + len("linear_c" )] A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' ) if "bot_conv" in key: A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" ) if "skip_conv1" in key: A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" ) if "skip_conv2" in key: A__ : int =key.replace("skip_conv2" , "2.convolution" ) if "fusion1" in key: A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" ) if "fusion2" in key: A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" ) if "fusion3" in key: A__ : int =key.replace("fusion3" , "3.fusion" ) if "fusion" in key and "conv" in key: A__ : List[str] =key.replace("conv" , "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" ) A__ : int =value return new_state_dict def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ): """simple docstring""" # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict A__ : List[str] =kv_weight[ : config.hidden_sizes[i], : ] A__ : Dict =kv_bias[: config.hidden_sizes[i]] A__ : Any =kv_weight[ config.hidden_sizes[i] :, : ] A__ : Any =kv_bias[config.hidden_sizes[i] :] def lowercase ( ): """simple docstring""" A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg" A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return image @torch.no_grad() def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ): """simple docstring""" A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) A__ : str =GLPNImageProcessor() # prepare image A__ : Any =prepare_img() A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) ) # rename keys A__ : Union[str, Any] =rename_keys(UpperCamelCase ) # key and value matrices need special treatment read_in_k_v(UpperCamelCase , UpperCamelCase ) # create HuggingFace model and load state dict A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # forward pass A__ : int =model(UpperCamelCase ) A__ : Optional[Any] =outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: A__ : List[Any] =torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: A__ : Tuple =torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) A__ : str =torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) __A : Any = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
656
0
"""simple docstring""" _SCREAMING_SNAKE_CASE = range(2, 20 + 1) _SCREAMING_SNAKE_CASE = [10**k for k in range(ks[-1] + 1)] _SCREAMING_SNAKE_CASE = {} def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" __snake_case = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) ) __snake_case = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) ) __snake_case = 0, 0 __snake_case = n - i __snake_case = memo.get(SCREAMING_SNAKE_CASE ) if sub_memo is not None: __snake_case = sub_memo.get(SCREAMING_SNAKE_CASE ) if jumps is not None and len(SCREAMING_SNAKE_CASE ) > 0: # find and make the largest jump without going over __snake_case = -1 for _k in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: __snake_case = _k break if max_jump >= 0: __snake_case = jumps[max_jump] # since the difference between jumps is cached, add c __snake_case = diff + c for j in range(min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) ): __snake_case = divmod(SCREAMING_SNAKE_CASE , 10 ) if new_c > 0: add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: __snake_case = [] else: __snake_case = {c: []} __snake_case = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps __snake_case = next_term(SCREAMING_SNAKE_CASE , k - 1 , i + dn , SCREAMING_SNAKE_CASE ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead __snake_case = compute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + dn , SCREAMING_SNAKE_CASE ) diff += _diff dn += terms_jumped __snake_case = sub_memo[c] # keep jumps sorted by # of terms skipped __snake_case = 0 while j < len(SCREAMING_SNAKE_CASE ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(SCREAMING_SNAKE_CASE , (diff, dn, k) ) return (diff, dn) def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if i >= n: return 0, i if k > len(SCREAMING_SNAKE_CASE ): a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) __snake_case = i __snake_case = 0, 0, 0 for j in range(len(SCREAMING_SNAKE_CASE ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 __snake_case = ds_c + ds_b diff += addend __snake_case = 0 for j in range(SCREAMING_SNAKE_CASE ): __snake_case = a_i[j] + addend __snake_case = divmod(SCREAMING_SNAKE_CASE , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return diff, i - start_i def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ): __snake_case = digits[j] + addend if s >= 10: __snake_case = divmod(SCREAMING_SNAKE_CASE , 10 ) __snake_case = addend // 10 + quotient else: __snake_case = s __snake_case = addend // 10 if addend == 0: break while addend > 0: __snake_case = divmod(SCREAMING_SNAKE_CASE , 10 ) digits.append(SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 10**15 ) -> List[str]: """simple docstring""" __snake_case = [1] __snake_case = 1 __snake_case = 0 while True: __snake_case = next_term(SCREAMING_SNAKE_CASE , 20 , i + dn , SCREAMING_SNAKE_CASE ) dn += terms_jumped if dn == n - i: break __snake_case = 0 for j in range(len(SCREAMING_SNAKE_CASE ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F"""{solution() = }""")
163
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Optional[Any] = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Union[str, Any] = """gpt_neo""" __magic_name__ : Union[str, Any] = ["""past_key_values"""] __magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ): A__ : Optional[Any] =vocab_size A__ : Dict =max_position_embeddings A__ : List[str] =hidden_size A__ : List[Any] =num_layers A__ : Tuple =num_heads A__ : List[str] =intermediate_size A__ : Tuple =window_size A__ : Dict =activation_function A__ : str =resid_dropout A__ : Union[str, Any] =embed_dropout A__ : List[str] =attention_dropout A__ : Tuple =classifier_dropout A__ : int =layer_norm_epsilon A__ : int =initializer_range A__ : str =use_cache A__ : Tuple =bos_token_id A__ : int =eos_token_id A__ : int =attention_types A__ : Any =self.expand_attention_types_params(UpperCamelCase__ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) @staticmethod def _UpperCAmelCase ( UpperCamelCase__ : List[str] ): A__ : Optional[Any] =[] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): """simple docstring""" import torch A__ : List[str] =input.size() A__ : Dict =len(UpperCamelCase ) A__ : Optional[int] =shape[dimension] A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1 A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None] A__ : Tuple =[slice(UpperCamelCase )] * rank A__ : int =indices A__ : Optional[int] =input[s] A__ : Union[str, Any] =list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase ) def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ): """simple docstring""" import torch A__ : List[str] =torch.arange(1 , UpperCamelCase ) A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase ) A__ : Optional[int] =remainders == 0 A__ : str =candidates[divisor_indices] A__ : int =torch.max(UpperCamelCase ) return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" ) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' @property def _UpperCAmelCase ( self : List[Any] ): A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" ) A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"} else: A__ : Tuple ={0: "batch", 1: "sequence"} return common_inputs @property def _UpperCAmelCase ( self : List[str] ): return self._config.num_heads def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ): A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape # Not using the same length for past_key_values A__ : Union[str, Any] =seqlen + 2 A__ : List[Any] =( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A__ : Optional[Any] =[ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] A__ : Optional[Any] =common_inputs["attention_mask"] if self.use_past: A__ : Any =ordered_inputs["attention_mask"].dtype A__ : Tuple =torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self : List[str] ): return 13
656
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. SCREAMING_SNAKE_CASE__ : Any ={"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" __snake_case = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __snake_case = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __snake_case = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __snake_case = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def a__ ( self , _lowercase , _lowercase , _lowercase ) -> int: _lowerCamelCase : Dict = ZeroShotClassificationPipeline( model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , candidate_labels=['''polics''', '''health'''] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def a__ ( self , _lowercase , _lowercase ) -> Dict: _lowerCamelCase : List[str] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' ) self.assertEqual(UpperCamelCase__ , {'''sequence''': ANY(UpperCamelCase__ ), '''labels''': [ANY(UpperCamelCase__ )], '''scores''': [ANY(UpperCamelCase__ )]} ) # No kwarg _lowerCamelCase : List[str] = classifier('''Who are you voting for in 2020?''' , ['''politics'''] ) self.assertEqual(UpperCamelCase__ , {'''sequence''': ANY(UpperCamelCase__ ), '''labels''': [ANY(UpperCamelCase__ )], '''scores''': [ANY(UpperCamelCase__ )]} ) _lowerCamelCase : Any = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] ) self.assertEqual(UpperCamelCase__ , {'''sequence''': ANY(UpperCamelCase__ ), '''labels''': [ANY(UpperCamelCase__ )], '''scores''': [ANY(UpperCamelCase__ )]} ) _lowerCamelCase : Optional[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' ) self.assertEqual( UpperCamelCase__ , {'''sequence''': ANY(UpperCamelCase__ ), '''labels''': [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], '''scores''': [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) _lowerCamelCase : Any = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] ) self.assertEqual( UpperCamelCase__ , {'''sequence''': ANY(UpperCamelCase__ ), '''labels''': [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], '''scores''': [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) _lowerCamelCase : List[Any] = classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' ) self.assertEqual(UpperCamelCase__ , {'''sequence''': ANY(UpperCamelCase__ ), '''labels''': [ANY(UpperCamelCase__ )], '''scores''': [ANY(UpperCamelCase__ )]} ) # https://github.com/huggingface/transformers/issues/13846 _lowerCamelCase : Dict = classifier(['''I am happy'''] , ['''positive''', '''negative'''] ) self.assertEqual( UpperCamelCase__ , [ {'''sequence''': ANY(UpperCamelCase__ ), '''labels''': [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], '''scores''': [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} for i in range(1 ) ] , ) _lowerCamelCase : Optional[Any] = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] ) self.assertEqual( UpperCamelCase__ , [ {'''sequence''': ANY(UpperCamelCase__ ), '''labels''': [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], '''scores''': [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} for i in range(2 ) ] , ) with self.assertRaises(UpperCamelCase__ ): classifier('''''' , candidate_labels='''politics''' ) with self.assertRaises(UpperCamelCase__ ): classifier(UpperCamelCase__ , candidate_labels='''politics''' ) with self.assertRaises(UpperCamelCase__ ): classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' ) with self.assertRaises(UpperCamelCase__ ): classifier('''Who are you voting for in 2020?''' , candidate_labels=UpperCamelCase__ ) with self.assertRaises(UpperCamelCase__ ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , ) with self.assertRaises(UpperCamelCase__ ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=UpperCamelCase__ , ) self.run_entailment_id(UpperCamelCase__ ) def a__ ( self , _lowercase ) -> Union[str, Any]: _lowerCamelCase : int = zero_shot_classifier.model.config _lowerCamelCase : int = config.labelaid _lowerCamelCase : List[Any] = zero_shot_classifier.entailment_id _lowerCamelCase : Dict = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) _lowerCamelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) _lowerCamelCase : int = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) _lowerCamelCase : Any = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) _lowerCamelCase : str = original_labelaid self.assertEqual(UpperCamelCase__ , zero_shot_classifier.entailment_id ) @require_torch def a__ ( self ) -> Optional[int]: _lowerCamelCase : List[str] = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( '''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] ) @require_torch def a__ ( self ) -> Optional[int]: _lowerCamelCase : Dict = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) _lowerCamelCase : Union[str, Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.333, 0.333, 0.333], } , ) @require_tf def a__ ( self ) -> Tuple: _lowerCamelCase : Any = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , ) _lowerCamelCase : Tuple = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.333, 0.333, 0.333], } , ) @slow @require_torch def a__ ( self ) -> Dict: _lowerCamelCase : str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' ) _lowerCamelCase : str = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.976, 0.015, 0.009], } , ) _lowerCamelCase : Dict = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=UpperCamelCase__ , ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.817, 0.713, 0.018, 0.018], } , ) @slow @require_tf def a__ ( self ) -> Dict: _lowerCamelCase : Tuple = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' ) _lowerCamelCase : Tuple = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.976, 0.015, 0.009], } , ) _lowerCamelCase : Optional[Any] = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=UpperCamelCase__ , ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.817, 0.713, 0.018, 0.018], } , )
434
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Any = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : Tuple = """megatron-bert""" def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ): super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) A__ : Optional[int] =vocab_size A__ : Optional[int] =hidden_size A__ : str =num_hidden_layers A__ : Any =num_attention_heads A__ : str =hidden_act A__ : Optional[int] =intermediate_size A__ : str =hidden_dropout_prob A__ : str =attention_probs_dropout_prob A__ : List[Any] =max_position_embeddings A__ : List[Any] =type_vocab_size A__ : Tuple =initializer_range A__ : Any =layer_norm_eps A__ : Any =position_embedding_type A__ : Union[str, Any] =use_cache
656
0