code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore __a = '\nHuman: <<task>>\n\nAssistant: ' __a = 'huggingface-tools/default-prompts' __a = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def a ( snake_case__: Union[str, Any] , snake_case__: str , snake_case__: Tuple="run" ): '''simple docstring''' if prompt_or_repo_id is None: lowercase_ = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('''\\s''' , snake_case__ ) is not None: return prompt_or_repo_id lowercase_ = cached_file( snake_case__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} ) with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: return f.read()
30
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig __a = logging.get_logger(__name__) # General docstring __a = 'RegNetConfig' # Base docstring __a = 'facebook/regnet-y-040' __a = [1, 1_0_8_8, 7, 7] # Image classification docstring __a = 'facebook/regnet-y-040' __a = 'tabby, tabby cat' __a = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) lowercase_ = ACTaFN[activation] if activation is not None else tf.identity def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_channels lowercase_ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]: lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) ) lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ ) class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) lowercase_ = [ tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) for layer_module in self.attention: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden_state * pooled return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer lowercase_ = [ # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ), *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int: for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention: lowercase_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ ) if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ ) @keras_serializable class lowercase__( tf.keras.layers.Layer ): """simple docstring""" a :str = RegNetConfig def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' ) lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) @unpack_inputs def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.encoder( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = encoder_outputs[0] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) # Change to NCHW output format have uniformity in the modules lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Tuple = RegNetConfig a :Any = 'regnet' a :List[str] = 'pixel_values' @property def _lowercase ( self : List[str] ) -> str: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} __a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' __a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_labels lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) # classification head lowercase_ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = outputs.pooler_output if return_dict else outputs[1] lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ ) lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ ) lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) if not return_dict: lowercase_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
30
1
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def a ( snake_case__: Dict , snake_case__: str , snake_case__: List[str] ): '''simple docstring''' lowercase_ = 1.5 lowercase_ = int(factor * num_class_images ) lowercase_ = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=snake_case__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ = client.query(text=snake_case__ ) if len(snake_case__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase_ = int(factor * num_images ) lowercase_ = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 , ) lowercase_ = 0 lowercase_ = 0 lowercase_ = tqdm(desc='''downloading real regularization images''' , total=snake_case__ ) with open(F'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(F'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open( F'''{class_data_dir}/images.txt''' , '''w''' ) as fa: while total < num_class_images: lowercase_ = class_images[count] count += 1 try: lowercase_ = requests.get(images['''url'''] ) if img.status_code == 200: lowercase_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser('''''' , add_help=snake_case__ ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case__ , type=snake_case__ ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case__ , type=snake_case__ ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=snake_case__ ) return parser.parse_args() if __name__ == "__main__": __a = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
30
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __a = logging.get_logger(__name__) def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ): '''simple docstring''' # Recurse if needed if "." in tensor_name: lowercase_ = tensor_name.split('''.''' ) for split in splits[:-1]: lowercase_ = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) lowercase_ = new_module lowercase_ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) lowercase_ = tensor_name in module._buffers lowercase_ = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) lowercase_ = False lowercase_ = False if is_buffer or not is_bitsandbytes_available(): lowercase_ = False lowercase_ = False else: lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: lowercase_ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to('''cpu''' ) if value.dtype == torch.inta: lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: lowercase_ = torch.tensor(snake_case__ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: lowercase_ = new_value.T lowercase_ = old_value.__dict__ if is_abit: lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) lowercase_ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to(snake_case__ ) else: lowercase_ = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: lowercase_ = new_value else: lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) lowercase_ = new_value def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ): '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: lowercase_ = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): lowercase_ , lowercase_ = module.weight.shape else: lowercase_ = module.in_features lowercase_ = module.out_features if quantization_config.quantization_method() == "llm_int8": lowercase_ = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) lowercase_ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: lowercase_ = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) lowercase_ = True # Store the module class in case we need to transpose the weight later lowercase_ = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ): '''simple docstring''' lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def a ( *snake_case__: str , **snake_case__: Dict ): '''simple docstring''' warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def a ( *snake_case__: Any , **snake_case__: List[Any] ): '''simple docstring''' warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def a ( snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() lowercase_ = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowercase_ = sum(snake_case__ , [] ) lowercase_ = len(snake_case__ ) > 0 # Check if it is a base model lowercase_ = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowercase_ = list(model.named_children() ) lowercase_ = [list_modules[-1][0]] # add last module together with tied weights lowercase_ = set(snake_case__ ) - set(snake_case__ ) lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys lowercase_ = ['''.weight''', '''.bias'''] lowercase_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowercase_ = name.replace(snake_case__ , '''''' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
30
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any] ) -> List[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. lowercase_ = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ = DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ ) self.assertTrue(isinstance(dc.token_ids , SCREAMING_SNAKE_CASE_ ) ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _lowercase ( self : List[str] ) -> Optional[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). lowercase_ = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(SCREAMING_SNAKE_CASE_ ): DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ ) # fails here def _lowercase ( self : Optional[Any] ) -> Any: lowercase_ = [[1, 2, 3], [1, 2, 4]] lowercase_ = DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ , lowercase_ = dc.update(1 ) lowercase_ = stepped is True and completed is False and reset is False self.assertTrue(SCREAMING_SNAKE_CASE_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ = dc.update(2 ) lowercase_ = stepped is True and completed is False and reset is False self.assertTrue(SCREAMING_SNAKE_CASE_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ = dc.update(3 ) lowercase_ = stepped is True and completed is True and reset is False self.assertTrue(SCREAMING_SNAKE_CASE_ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _lowercase ( self : Tuple ) -> int: lowercase_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ = DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ , lowercase_ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
30
import argparse import os import re __a = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __a = re.compile(r'\s*\(\s*"(\S[^"]+)"') def a ( snake_case__: str , snake_case__: bool = False ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: lowercase_ = f.read() lowercase_ = content.split('''\n''' ) lowercase_ = [] lowercase_ = 0 while line_idx < len(snake_case__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowercase_ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowercase_ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(snake_case__ ) ) elif "\n".join(snake_case__ ) != content: return True def a ( snake_case__: bool = False ): '''simple docstring''' lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )] lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames] if not overwrite and any(snake_case__ ): lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix''' ''' this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __a = parser.parse_args() sort_all_auto_mappings(not args.check_only)
30
1
def a ( snake_case__: int ): '''simple docstring''' if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) lowercase_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowercase_ = 1 if upper_limit > 0: lowercase_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(snake_case__ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('\n********* Catalan Numbers Using Dynamic Programming ************\n') print('\n*** Enter -1 at any time to quit ***') print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='') try: while True: __a = int(input().strip()) if N < 0: print('\n********* Goodbye!! ************') break else: print(f"The Catalan numbers from 0 through {N} are:") print(catalan_numbers(N)) print('Try another upper limit for the sequence: ', end='') except (NameError, ValueError): print('\n********* Invalid input, goodbye! ************\n') import doctest doctest.testmod()
30
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if index == number_of_items: return 0 lowercase_ = 0 lowercase_ = 0 lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 ) if weights[index] <= max_weight: lowercase_ = values[index] + knapsack( snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 ) return max(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __a = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> List[Any]: lowercase_ = {} lowercase_ = {} if prompt is not None: lowercase_ = prompt if generate_kwargs is not None: lowercase_ = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowercase_ = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowercase_ = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **SCREAMING_SNAKE_CASE_ : Dict ) -> Dict: return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=None ) -> Dict: lowercase_ = load_image(SCREAMING_SNAKE_CASE_ ) if prompt is not None: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError( f'''Received an invalid text input, got - {type(SCREAMING_SNAKE_CASE_ )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowercase_ = self.model.config.model_type if model_type == "git": lowercase_ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) lowercase_ = self.tokenizer(text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids lowercase_ = [self.tokenizer.cls_token_id] + input_ids lowercase_ = torch.tensor(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowercase_ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , header_text=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowercase_ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) lowercase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowercase_ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowercase_ = None return model_inputs def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> Tuple: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , SCREAMING_SNAKE_CASE_ ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowercase_ = None if generate_kwargs is None: lowercase_ = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowercase_ = model_inputs.pop(self.model.main_input_name ) lowercase_ = self.model.generate(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return model_outputs def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = [] for output_ids in model_outputs: lowercase_ = { '''generated_text''': self.tokenizer.decode( SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , ) } records.append(SCREAMING_SNAKE_CASE_ ) return records
30
import argparse from collections import defaultdict import yaml __a = 'docs/source/en/_toctree.yml' def a ( snake_case__: Dict ): '''simple docstring''' lowercase_ = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 lowercase_ = [key for key, value in counts.items() if value > 1] lowercase_ = [] for duplicate_key in duplicates: lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def a ( snake_case__: List[Any]=False ): '''simple docstring''' with open(snake_case__ , encoding='''utf-8''' ) as f: lowercase_ = yaml.safe_load(f.read() ) # Get to the API doc lowercase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase_ = content[api_idx]['''sections'''] # Then to the model doc lowercase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowercase_ = api_doc[model_idx]['''sections'''] lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section] lowercase_ = False for idx, modality_doc in modalities_docs: lowercase_ = modality_doc['''sections'''] lowercase_ = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: lowercase_ = True if overwrite: lowercase_ = new_modality_doc if diff: if overwrite: lowercase_ = model_doc lowercase_ = api_doc with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a = parser.parse_args() check_model_doc(args.fix_and_overwrite)
30
1
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) __a = parser.parse_args() __a = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __a = CLIPImageProcessor() __a = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') __a = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
30
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[str, Any] = 'upernet' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = backbone_config.get('''model_type''' ) lowercase_ = CONFIG_MAPPING[backbone_model_type] lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) lowercase_ = backbone_config lowercase_ = hidden_size lowercase_ = initializer_range lowercase_ = pool_scales lowercase_ = use_auxiliary_head lowercase_ = auxiliary_loss_weight lowercase_ = auxiliary_in_channels lowercase_ = auxiliary_channels lowercase_ = auxiliary_num_convs lowercase_ = auxiliary_concat_input lowercase_ = loss_ignore_index def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.backbone_config.to_dict() lowercase_ = self.__class__.model_type return output
30
1
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str: lowercase_ = 3 lowercase_ = 2_5_0 lowercase_ = ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float ) / length return input_ids, scores def _lowercase ( self : Optional[Any] ) -> Any: lowercase_ , lowercase_ = self._get_tensors(5 ) lowercase_ = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ , lowercase_ = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ , lowercase_ = self._get_tensors(1_0 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : str ) -> Optional[int]: lowercase_ = MaxLengthCriteria(max_length=1_0 ) lowercase_ , lowercase_ = self._get_tensors(5 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ , lowercase_ = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ , lowercase_ = self._get_tensors(1_0 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Dict ) -> Optional[int]: lowercase_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowercase_ , lowercase_ = self._get_tensors(5 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ , lowercase_ = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ , lowercase_ = self._get_tensors(1_0 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def _lowercase ( self : str ) -> Union[str, Any]: lowercase_ , lowercase_ = self._get_tensors(5 ) lowercase_ = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(SCREAMING_SNAKE_CASE_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) lowercase_ = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
30
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __a = 'sshleifer/bart-tiny-random' __a = 'patrickvonplaten/t5-tiny-random' @require_torch class lowercase__( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Any ) -> Tuple: return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _lowercase ( self : List[Any] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: with self.assertRaises(SCREAMING_SNAKE_CASE_ ): create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
30
1
import argparse import datetime def a ( snake_case__: str ): '''simple docstring''' lowercase_ = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } lowercase_ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(snake_case__ ) < 11: raise ValueError('''Must be 10 characters long''' ) # Get month lowercase_ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('''Month must be between 1 - 12''' ) lowercase_ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get day lowercase_ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('''Date must be between 1 - 31''' ) # Get second separator lowercase_ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get year lowercase_ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''' ) # Get datetime obj for validation lowercase_ = datetime.date(int(snake_case__ ) , int(snake_case__ ) , int(snake_case__ ) ) # Start math if m <= 2: lowercase_ = y - 1 lowercase_ = m + 12 # maths var lowercase_ = int(str(snake_case__ )[:2] ) lowercase_ = int(str(snake_case__ )[2:] ) lowercase_ = int(2.6 * m - 5.3_9 ) lowercase_ = int(c / 4 ) lowercase_ = int(k / 4 ) lowercase_ = int(d + k ) lowercase_ = int(t + u + v + x ) lowercase_ = int(z - (2 * c) ) lowercase_ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' ) # Response lowercase_ = F'''Your date {date_input}, is a {days[str(snake_case__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() __a = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) __a = parser.parse_args() zeller(args.date_input)
30
def a ( snake_case__: int = 100 ): '''simple docstring''' lowercase_ = (n * (n + 1) // 2) ** 2 lowercase_ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"{solution() = }")
30
1
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class lowercase__: """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=1_3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : List[str]=3_7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=1_0 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : str=0.9 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , ) -> Any: lowercase_ = parent lowercase_ = batch_size lowercase_ = image_size lowercase_ = num_channels lowercase_ = patch_size lowercase_ = tubelet_size lowercase_ = num_frames lowercase_ = is_training lowercase_ = use_labels lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = mask_ratio lowercase_ = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase_ = (image_size // patch_size) ** 2 lowercase_ = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase_ = int(mask_ratio * self.seq_length ) def _lowercase ( self : str ) -> int: lowercase_ = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = self.get_config() return config, pixel_values, labels def _lowercase ( self : Any ) -> Tuple: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str: lowercase_ = VideoMAEModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowercase_ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: lowercase_ = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase_ = torch.ones((self.num_masks,) ) lowercase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase_ = mask.expand(self.batch_size , -1 ).bool() lowercase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # model only returns predictions for masked patches lowercase_ = mask.sum().item() lowercase_ = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _lowercase ( self : str ) -> Any: lowercase_ = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ = config_and_inputs lowercase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[int] = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) a :Optional[int] = ( {'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification} if is_torch_available() else {} ) a :Optional[int] = False a :int = False a :str = False a :int = False def _lowercase ( self : int ) -> Optional[int]: lowercase_ = VideoMAEModelTester(self ) lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 ) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=False ) -> str: lowercase_ = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase_ = torch.ones((self.model_tester.num_masks,) ) lowercase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase_ = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase_ = bool_masked_pos.to(SCREAMING_SNAKE_CASE_ ) if return_labels: if model_class in [ *get_values(SCREAMING_SNAKE_CASE_ ), ]: lowercase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) return inputs_dict def _lowercase ( self : Optional[int] ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''' ) def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: pass def _lowercase ( self : List[str] ) -> List[str]: lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def _lowercase ( self : Optional[Any] ) -> Tuple: lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) lowercase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ = [*signature.parameters.keys()] lowercase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> List[Any]: lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> Dict: lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ ) @slow def _lowercase ( self : Any ) -> Optional[Any]: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = VideoMAEModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] ) -> List[Any]: if not self.has_attentions: pass else: lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = True for model_class in self.all_model_classes: lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks lowercase_ = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase_ = True lowercase_ = False lowercase_ = True lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase_ = True lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase_ = len(SCREAMING_SNAKE_CASE_ ) # Check attention is always last and order is fine lowercase_ = True lowercase_ = True lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _lowercase ( self : List[str] ) -> List[str]: def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ): lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = outputs.hidden_states lowercase_ = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks lowercase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self : Tuple ) -> List[Any]: pass def a ( ): '''simple docstring''' lowercase_ = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) lowercase_ = np.load(snake_case__ ) return list(snake_case__ ) @require_torch @require_vision class lowercase__( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : List[str] ) -> Optional[int]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _lowercase ( self : str ) -> int: lowercase_ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to( SCREAMING_SNAKE_CASE_ ) lowercase_ = self.default_image_processor lowercase_ = prepare_video() lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): lowercase_ = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits lowercase_ = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) @slow def _lowercase ( self : str ) -> Dict: lowercase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.default_image_processor lowercase_ = prepare_video() lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # add boolean mask, indicating which patches to mask lowercase_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) lowercase_ = torch.load(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): lowercase_ = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits lowercase_ = torch.Size([1, 1_4_0_8, 1_5_3_6] ) lowercase_ = torch.tensor( [[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=SCREAMING_SNAKE_CASE_ ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase_ = torch.tensor([0.51_42] , device=SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=SCREAMING_SNAKE_CASE_ ).to( SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): lowercase_ = model(**SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.tensor(torch.tensor([0.64_69] ) , device=SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
30
import logging from transformers.configuration_utils import PretrainedConfig __a = logging.getLogger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = 'masked_bert' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = pruning_method lowercase_ = mask_init lowercase_ = mask_scale
30
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __a = 1_6 __a = 3_2 def a ( snake_case__: Accelerator , snake_case__: DatasetDict , snake_case__: List[int] , snake_case__: List[int] , snake_case__: int = 16 ): '''simple docstring''' lowercase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase_ = DatasetDict( { '''train''': dataset['''train'''].select(snake_case__ ), '''validation''': dataset['''train'''].select(snake_case__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(snake_case__: Tuple ): # max_length=None => use the model max length (it's actually the default) lowercase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase_ = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case__: int ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase_ = 16 elif accelerator.mixed_precision != "no": lowercase_ = 8 else: lowercase_ = None return tokenizer.pad( snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , ) # Instantiate dataloaders. lowercase_ = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) lowercase_ = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) lowercase_ = DataLoader( tokenized_datasets['''test'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader, test_dataloader def a ( snake_case__: Optional[Any] , snake_case__: List[Any] ): '''simple docstring''' # New Code # lowercase_ = [] # Download the dataset lowercase_ = load_dataset('''glue''' , '''mrpc''' ) # Create our splits lowercase_ = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator lowercase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ = config['''lr'''] lowercase_ = int(config['''num_epochs'''] ) lowercase_ = int(config['''seed'''] ) lowercase_ = int(config['''batch_size'''] ) lowercase_ = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation lowercase_ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowercase_ = batch_size // MAX_GPU_BATCH_SIZE lowercase_ = MAX_GPU_BATCH_SIZE set_seed(snake_case__ ) # New Code # # Create our folds: lowercase_ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) lowercase_ = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(snake_case__ ): lowercase_ , lowercase_ , lowercase_ = get_fold_dataloaders( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase_ = model.to(accelerator.device ) # Instantiate optimizer lowercase_ = AdamW(params=model.parameters() , lr=snake_case__ ) # Instantiate scheduler lowercase_ = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase_ = model(**snake_case__ ) lowercase_ = outputs.loss lowercase_ = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ = model(**snake_case__ ) lowercase_ = outputs.logits.argmax(dim=-1 ) lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) lowercase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , snake_case__ ) # New Code # # We also run predictions on the test set at the very end lowercase_ = [] for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ = model(**snake_case__ ) lowercase_ = outputs.logits lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(snake_case__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: lowercase_ = torch.cat(snake_case__ , dim=0 ) lowercase_ = torch.stack(snake_case__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) lowercase_ = metric.compute(predictions=snake_case__ , references=snake_case__ ) accelerator.print('''Average test metrics from all folds:''' , snake_case__ ) def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=snake_case__ , default=3 , help='''The number of splits to perform across the dataset''' ) lowercase_ = parser.parse_args() lowercase_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
30
import os def a ( ): '''simple docstring''' lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
30
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[List[PIL.Image.Image], np.ndarray] a :Optional[List[bool]] a :Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
30
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ): '''simple docstring''' lowercase_ = { '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) assert base_extractor.is_extractable(snake_case__ ) lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ): '''simple docstring''' lowercase_ = { '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } lowercase_ = input_paths[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) lowercase_ = Extractor.infer_extractor_format(snake_case__ ) assert extractor_format is not None lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(snake_case__ , snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_dot_dot''' directory.mkdir() lowercase_ = directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) ) return path @pytest.fixture def a ( snake_case__: int ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_sym_link''' directory.mkdir() lowercase_ = directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ ) with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , ) def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = { '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } lowercase_ = insecure_tar_files[insecure_tar_file] lowercase_ = tmp_path / '''extracted''' TarExtractor.extract(snake_case__ , snake_case__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def a ( snake_case__: Optional[int] ): '''simple docstring''' # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number lowercase_ = tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 lowercase_ = ( B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(snake_case__ ) assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
30
1
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract __a = logging.get_logger(__name__) def a ( snake_case__: str , snake_case__: str , snake_case__: Any ): '''simple docstring''' return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def a ( snake_case__: np.ndarray , snake_case__: Optional[str] , snake_case__: Optional[str] = None ): '''simple docstring''' lowercase_ = tesseract_config if tesseract_config is not None else '''''' # apply OCR lowercase_ = to_pil_image(snake_case__ ) lowercase_ , lowercase_ = pil_image.size lowercase_ = pytesseract.image_to_data(snake_case__ , lang=snake_case__ , output_type='''dict''' , config=snake_case__ ) lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates lowercase_ = [idx for idx, word in enumerate(snake_case__ ) if not word.strip()] lowercase_ = [word for idx, word in enumerate(snake_case__ ) if idx not in irrelevant_indices] lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices] lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices] lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices] lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format lowercase_ = [] for x, y, w, h in zip(snake_case__ , snake_case__ , snake_case__ , snake_case__ ): lowercase_ = [x, y, x + w, y + h] actual_boxes.append(snake_case__ ) # finally, normalize the bounding boxes lowercase_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(snake_case__ , snake_case__ , snake_case__ ) ) assert len(snake_case__ ) == len(snake_case__ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[Any] = ['pixel_values'] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "" , **SCREAMING_SNAKE_CASE_ : int , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ ) lowercase_ = do_resize lowercase_ = size lowercase_ = resample lowercase_ = apply_ocr lowercase_ = ocr_lang lowercase_ = tesseract_config def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> np.ndarray: lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowercase_ = (size['''height'''], size['''width''']) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Dict , ) -> PIL.Image.Image: lowercase_ = do_resize if do_resize is not None else self.do_resize lowercase_ = size if size is not None else self.size lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ ) lowercase_ = resample if resample is not None else self.resample lowercase_ = apply_ocr if apply_ocr is not None else self.apply_ocr lowercase_ = ocr_lang if ocr_lang is not None else self.ocr_lang lowercase_ = tesseract_config if tesseract_config is not None else self.tesseract_config lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if apply_ocr: requires_backends(self , '''pytesseract''' ) lowercase_ = [] lowercase_ = [] for image in images: lowercase_ , lowercase_ = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) words_batch.append(SCREAMING_SNAKE_CASE_ ) boxes_batch.append(SCREAMING_SNAKE_CASE_ ) if do_resize: lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) lowercase_ = [flip_channel_order(SCREAMING_SNAKE_CASE_ ) for image in images] lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] lowercase_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=SCREAMING_SNAKE_CASE_ ) if apply_ocr: lowercase_ = words_batch lowercase_ = boxes_batch return data
30
from __future__ import annotations def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase_ , lowercase_ = array[indexa], array[indexa] def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) for i in range(snake_case__ , low + middle ): comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ ) def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 ) bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
30
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowercase__( UpperCAmelCase ): """simple docstring""" a :UNetaDModel a :KarrasVeScheduler def __init__( self : int , SCREAMING_SNAKE_CASE_ : UNetaDModel , SCREAMING_SNAKE_CASE_ : KarrasVeScheduler ) -> List[str]: super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 5_0 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ) -> Union[Tuple, ImagePipelineOutput]: lowercase_ = self.unet.config.sample_size lowercase_ = (batch_size, 3, img_size, img_size) lowercase_ = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) lowercase_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper lowercase_ = self.scheduler.schedule[t] lowercase_ = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat lowercase_ , lowercase_ = self.scheduler.add_noise_to_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. lowercase_ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev lowercase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. lowercase_ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample lowercase_ = self.scheduler.step_correct( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , step_output.prev_sample , step_output['''derivative'''] , ) lowercase_ = step_output.prev_sample lowercase_ = (sample / 2 + 0.5).clamp(0 , 1 ) lowercase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
30
from __future__ import annotations from collections.abc import MutableSequence class lowercase__: """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None: if len(SCREAMING_SNAKE_CASE_ ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = degree def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: if self.degree > polynomial_a.degree: lowercase_ = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ ) else: lowercase_ = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : int ) -> Polynomial: return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: lowercase_ = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float: lowercase_ = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Tuple ) -> str: lowercase_ = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ ) return polynomial def __repr__( self : Optional[Any] ) -> str: return self.__str__() def _lowercase ( self : int ) -> Polynomial: lowercase_ = [0] * self.degree for i in range(self.degree ): lowercase_ = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial: lowercase_ = [0] * (self.degree + 2) lowercase_ = constant for i in range(self.degree + 1 ): lowercase_ = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ ) def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool: return not self.__eq__(SCREAMING_SNAKE_CASE_ )
30
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __a = 1_6 __a = 3_2 def a ( snake_case__: Accelerator , snake_case__: int = 16 ): '''simple docstring''' lowercase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase_ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case__: int ): # max_length=None => use the model max length (it's actually the default) lowercase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase_ = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case__: Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase_ = 16 elif accelerator.mixed_precision != "no": lowercase_ = 8 else: lowercase_ = None return tokenizer.pad( snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , ) # Instantiate dataloaders. lowercase_ = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) lowercase_ = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __a = mocked_dataloaders # noqa: F811 def a ( snake_case__: int , snake_case__: Tuple ): '''simple docstring''' # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case__ ) == "1": lowercase_ = 2 # New Code # lowercase_ = int(args.gradient_accumulation_steps ) lowercase_ = int(args.local_sgd_steps ) # Initialize accelerator lowercase_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ = config['''lr'''] lowercase_ = int(config['''num_epochs'''] ) lowercase_ = int(config['''seed'''] ) lowercase_ = int(config['''batch_size'''] ) lowercase_ = evaluate.load('''glue''' , '''mrpc''' ) set_seed(snake_case__ ) lowercase_ , lowercase_ = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase_ = model.to(accelerator.device ) # Instantiate optimizer lowercase_ = AdamW(params=model.parameters() , lr=snake_case__ ) # Instantiate scheduler lowercase_ = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() with LocalSGD( accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(snake_case__ ): lowercase_ = model(**snake_case__ ) lowercase_ = output.loss accelerator.backward(snake_case__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ = model(**snake_case__ ) lowercase_ = outputs.logits.argmax(dim=-1 ) lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) lowercase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , snake_case__ ) def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=snake_case__ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument( '''--local_sgd_steps''' , type=snake_case__ , default=8 , help='''Number of local SGD steps or None to disable local SGD''' ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowercase_ = parser.parse_args() lowercase_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
30
import itertools import math def a ( snake_case__: int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( ): '''simple docstring''' lowercase_ = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def a ( snake_case__: int = 10_001 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) ) if __name__ == "__main__": print(f"{solution() = }")
30
1
class lowercase__: """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> None: lowercase_ = size lowercase_ = [0] * size lowercase_ = [0] * size @staticmethod def _lowercase ( SCREAMING_SNAKE_CASE_ : int ) -> int: return index | (index + 1) @staticmethod def _lowercase ( SCREAMING_SNAKE_CASE_ : int ) -> int: return (index & (index + 1)) - 1 def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: lowercase_ = value while index < self.size: lowercase_ = self.get_prev(SCREAMING_SNAKE_CASE_ ) + 1 if current_left_border == index: lowercase_ = value else: lowercase_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = self.get_next(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int: right -= 1 # Because of right is exclusive lowercase_ = 0 while left <= right: lowercase_ = self.get_prev(SCREAMING_SNAKE_CASE_ ) if left <= current_left: lowercase_ = max(SCREAMING_SNAKE_CASE_ , self.tree[right] ) lowercase_ = current_left else: lowercase_ = max(SCREAMING_SNAKE_CASE_ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
30
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
30
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __a = get_tests_dir('fixtures') __a = get_tests_dir('fixtures/dummy_feature_extractor_config.json') __a = get_tests_dir('fixtures/dummy-config.json') class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ) -> Any: lowercase_ = 0 def _lowercase ( self : List[Any] ) -> Union[str, Any]: lowercase_ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple ) -> Any: lowercase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally lowercase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict() config_dict.pop('''feature_extractor_type''' ) lowercase_ = WavaVecaFeatureExtractor(**SCREAMING_SNAKE_CASE_ ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) config.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowercase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) # make sure private variable is not incorrectly saved lowercase_ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Tuple: lowercase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] ) -> List[Any]: with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowercase_ = AutoFeatureExtractor.from_pretrained('''bert-base''' ) def _lowercase ( self : Optional[Any] ) -> str: with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowercase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ , revision='''aaaaaa''' ) def _lowercase ( self : int ) -> Optional[int]: with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): lowercase_ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _lowercase ( self : Tuple ) -> Tuple: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(SCREAMING_SNAKE_CASE_ ): lowercase_ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE_ ): lowercase_ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) lowercase_ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowercase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) def _lowercase ( self : List[Any] ) -> Tuple: try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE_ ): AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase_ = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowercase_ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def _lowercase ( self : Any ) -> Dict: class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = True try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # If remote code is not set, the default is to use local lowercase_ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. lowercase_ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub lowercase_ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(not hasattr(SCREAMING_SNAKE_CASE_ , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
30
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __a = logging.get_logger(__name__) __a = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config is None: assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) lowercase_ = self.model.config else: lowercase_ = config lowercase_ = data_args lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' ''' padding..''' ) if self.args.label_smoothing == 0: lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowercase_ = label_smoothed_nll_loss def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: if self.optimizer is None: lowercase_ = ['''bias''', '''LayerNorm.weight'''] lowercase_ = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] lowercase_ = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowercase_ = Adafactor lowercase_ = {'''scale_parameter''': False, '''relative_step''': False} else: lowercase_ = AdamW lowercase_ = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } lowercase_ = self.args.learning_rate if self.sharded_ddp: lowercase_ = OSS( params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) else: lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.lr_scheduler is None: lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict: lowercase_ = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowercase_ = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowercase_ = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) return scheduler def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2] else: # compute label smoothed loss lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]: lowercase_ = inputs.pop('''labels''' ) lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return loss def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ ) lowercase_ = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowercase_ = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) lowercase_ = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowercase_ = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple: # If PAD token is not defined at least EOS token has to be defined lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' f''' padded to `max_length`={max_length}''' ) lowercase_ = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowercase_ = tensor return padded_tensor
30
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = 1_0 def _lowercase ( self : int ) -> List[str]: lowercase_ = [1, 2, 3, 4] lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[Any]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> List[Any]: lowercase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = '''''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) lowercase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = ['''It was the best of times.'''] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ = torch.tensor([1, 2, 3, 4] ) lowercase_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() ) def _lowercase ( self : int ) -> Dict: lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def _lowercase ( self : List[str] ) -> Tuple: lowercase_ = 1_0_1 lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
1
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : str , *SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : str ) -> int: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = eval_examples lowercase_ = post_process_function def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : str = "eval" ) -> Union[str, Any]: lowercase_ = self.eval_dataset if eval_dataset is None else eval_dataset lowercase_ = self.get_eval_dataloader(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowercase_ = self.compute_metrics lowercase_ = None lowercase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowercase_ = time.time() try: lowercase_ = eval_loop( SCREAMING_SNAKE_CASE_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , metric_key_prefix=SCREAMING_SNAKE_CASE_ , ) finally: lowercase_ = compute_metrics lowercase_ = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowercase_ = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions ) lowercase_ = self.compute_metrics(SCREAMING_SNAKE_CASE_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowercase_ = metrics.pop(SCREAMING_SNAKE_CASE_ ) metrics.update(output.metrics ) else: lowercase_ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(SCREAMING_SNAKE_CASE_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowercase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE_ ) return metrics def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : str = "test" ) -> Optional[Any]: lowercase_ = self.get_test_dataloader(SCREAMING_SNAKE_CASE_ ) # Temporarily disable metric computation, we will do it in the loop here. lowercase_ = self.compute_metrics lowercase_ = None lowercase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowercase_ = time.time() try: lowercase_ = eval_loop( SCREAMING_SNAKE_CASE_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , metric_key_prefix=SCREAMING_SNAKE_CASE_ , ) finally: lowercase_ = compute_metrics lowercase_ = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowercase_ = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions , '''predict''' ) lowercase_ = self.compute_metrics(SCREAMING_SNAKE_CASE_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowercase_ = metrics.pop(SCREAMING_SNAKE_CASE_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE_ )
30
def a ( snake_case__: list ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] lowercase_ = [] def generate(snake_case__: int , snake_case__: list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowercase_ , lowercase_ = arr[k - 1], arr[i] else: # k is odd lowercase_ , lowercase_ = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item) for item in user_input.split(',')] print(heaps(arr))
30
1
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } __a = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } __a = { 'vinai/phobert-base': 2_5_6, 'vinai/phobert-large': 2_5_6, } def a ( snake_case__: List[str] ): '''simple docstring''' lowercase_ = set() lowercase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase_ = char lowercase_ = set(snake_case__ ) return pairs class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[Any] = VOCAB_FILES_NAMES a :List[str] = PRETRAINED_VOCAB_FILES_MAP a :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : List[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : List[Any]="<mask>" , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Union[str, Any]: super().__init__( bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase_ = vocab_file lowercase_ = merges_file lowercase_ = {} lowercase_ = 0 lowercase_ = 1 lowercase_ = 2 lowercase_ = 3 self.add_from_file(SCREAMING_SNAKE_CASE_ ) lowercase_ = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: lowercase_ = merges_handle.read().split('''\n''' )[:-1] lowercase_ = [tuple(merge.split()[:-1] ) for merge in merges] lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) lowercase_ = {} def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase_ = [self.cls_token_id] lowercase_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowercase ( self : Any ) -> Any: return len(self.encoder ) def _lowercase ( self : Any ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> Any: if token in self.cache: return self.cache[token] lowercase_ = tuple(SCREAMING_SNAKE_CASE_ ) lowercase_ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowercase_ , lowercase_ = bigram lowercase_ = [] lowercase_ = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase_ = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase_ = tuple(SCREAMING_SNAKE_CASE_ ) lowercase_ = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ ) lowercase_ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ ) lowercase_ = word[:-4] lowercase_ = word return word def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> int: lowercase_ = [] lowercase_ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) ) return split_tokens def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]: return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]: return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Tuple: lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip() return out_string def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase_ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.merges_file , SCREAMING_SNAKE_CASE_ ) return out_vocab_file, out_merge_file def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): try: with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(SCREAMING_SNAKE_CASE_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' ) return lowercase_ = f.readlines() for lineTmp in lines: lowercase_ = lineTmp.strip() lowercase_ = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowercase_ = line[:idx] lowercase_ = len(self.encoder )
30
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , ) lowercase_ = parser.parse_args() return args def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ): '''simple docstring''' if not len(snake_case__ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowercase_ , lowercase_ = imgs[0].size lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) ) lowercase_ , lowercase_ = grid.size for i, img in enumerate(snake_case__ ): grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) ) return grid def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ): '''simple docstring''' lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ ) lowercase_ = pipeline( snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images lowercase_ = int(math.sqrt(snake_case__ ) ) lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __a = parse_args() # Load models and create wrapper for stable diffusion __a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') __a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') __a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') __a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') __a = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __a = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): __a = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: __a = unet.to(torch.device('cuda', args.cuda_id)) __a = pipeline.to(unet.device) __a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) __a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
30
1
import os import pytest from attr import dataclass __a = 'us-east-1' # defaults region @dataclass class lowercase__: """simple docstring""" a :str a :List[str] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' a :Optional[int] = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 500, 'save_steps': 5_500, } a :Any = {**hyperparameters, 'max_steps': 1_000} @property def _lowercase ( self : Any ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def _lowercase ( self : Union[str, Any] ) -> str: return f'''{self.framework}-transfromers-test''' @property def _lowercase ( self : Optional[Any] ) -> str: return f'''./tests/sagemaker/scripts/{self.framework}''' @property def _lowercase ( self : List[str] ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='''class''' ) def a ( snake_case__: Optional[int] ): '''simple docstring''' lowercase_ = SageMakerTestEnvironment(framework=request.cls.framework )
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import itertools import math def a ( snake_case__: int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( ): '''simple docstring''' lowercase_ = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def a ( snake_case__: int = 10_001 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) ) if __name__ == "__main__": print(f"{solution() = }")
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['DeiTFeatureExtractor'] __a = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __a = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. __a = direct_transformers_import(PATH_TO_TRANSFORMERS) __a = transformers.models.auto.configuration_auto.CONFIG_MAPPING __a = { # used to compute the property `self.chunk_length` 'EncodecConfig': ['overlap'], # used as `self.bert_model = BertModel(config, ...)` 'DPRConfig': True, # not used in modeling files, but it's an important information 'FSMTConfig': ['langs'], # used internally in the configuration class file 'GPTNeoConfig': ['attention_types'], # used internally in the configuration class file 'EsmConfig': ['is_folding_model'], # used during training (despite we don't have training script for these models yet) 'Mask2FormerConfig': ['ignore_value'], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) 'OneFormerConfig': ['ignore_value', 'norm'], # used during preprocessing and collation, see `collating_graphormer.py` 'GraphormerConfig': ['spatial_pos_max'], # used internally in the configuration class file 'T5Config': ['feed_forward_proj'], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally 'MT5Config': ['feed_forward_proj', 'tokenizer_class'], 'UMT5Config': ['feed_forward_proj', 'tokenizer_class'], # used internally in the configuration class file 'LongT5Config': ['feed_forward_proj'], # used internally in the configuration class file 'SwitchTransformersConfig': ['feed_forward_proj'], # having default values other than `1e-5` - we can't fix them without breaking 'BioGptConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'GLPNConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'SegformerConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'CvtConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'PerceiverConfig': ['layer_norm_eps'], # used internally to calculate the feature size 'InformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate the feature size 'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate the feature size 'AutoformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate `mlp_dim` 'SamVisionConfig': ['mlp_ratio'], # For (head) training, but so far not implemented 'ClapAudioConfig': ['num_classes'], # Not used, but providing useful information to users 'SpeechT5HifiGanConfig': ['sampling_rate'], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { 'CLIPSegConfig': True, 'DeformableDetrConfig': True, 'DetaConfig': True, 'DinatConfig': True, 'DonutSwinConfig': True, 'EfficientFormerConfig': True, 'FSMTConfig': True, 'JukeboxConfig': True, 'LayoutLMv2Config': True, 'MaskFormerSwinConfig': True, 'MT5Config': True, 'NatConfig': True, 'OneFormerConfig': True, 'PerceiverConfig': True, 'RagConfig': True, 'SpeechT5Config': True, 'SwinConfig': True, 'Swin2SRConfig': True, 'Swinv2Config': True, 'SwitchTransformersConfig': True, 'TableTransformerConfig': True, 'TapasConfig': True, 'TransfoXLConfig': True, 'UniSpeechConfig': True, 'UniSpeechSatConfig': True, 'WavLMConfig': True, 'WhisperConfig': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) 'JukeboxPriorConfig': True, # TODO: @Younes (for `is_decoder`) 'Pix2StructTextConfig': True, } ) def a ( snake_case__: Dict , snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Tuple ): '''simple docstring''' lowercase_ = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F'''config.{attribute}''' in modeling_source or F'''getattr(config, "{attribute}"''' in modeling_source or F'''getattr(self.config, "{attribute}"''' in modeling_source ): lowercase_ = True # Deal with multi-line cases elif ( re.search( rF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , snake_case__ , ) is not None ): lowercase_ = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: lowercase_ = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files lowercase_ = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] lowercase_ = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed lowercase_ = True if not attribute_used: lowercase_ = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: lowercase_ = True elif attribute in ["tie_word_embeddings"] and default_value is False: lowercase_ = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: lowercase_ = True elif attribute.endswith('''_token_id''' ): lowercase_ = True # configuration class specific cases if not case_allowed: lowercase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) lowercase_ = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def a ( snake_case__: Dict ): '''simple docstring''' lowercase_ = dict(inspect.signature(config_class.__init__ ).parameters ) lowercase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] lowercase_ = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass lowercase_ = {} if len(config_class.attribute_map ) > 0: lowercase_ = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files lowercase_ = inspect.getsourcefile(snake_case__ ) lowercase_ = os.path.dirname(snake_case__ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith('''modeling_''' )] # Get the source code strings lowercase_ = [] for path in modeling_paths: if os.path.isfile(snake_case__ ): with open(snake_case__ ) as fp: modeling_sources.append(fp.read() ) lowercase_ = [] for config_param, default_value in zip(snake_case__ , snake_case__ ): # `attributes` here is all the variant names for `config_param` lowercase_ = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(snake_case__ , snake_case__ , snake_case__ , snake_case__ ): unused_attributes.append(attributes[0] ) return sorted(snake_case__ ) def a ( ): '''simple docstring''' lowercase_ = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) lowercase_ = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda snake_case__ : inspect.isclass(snake_case__ ) and issubclass(snake_case__ , snake_case__ ) and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: lowercase_ = check_config_attributes_being_used(snake_case__ ) if len(snake_case__ ) > 0: lowercase_ = unused_attributes if len(snake_case__ ) > 0: lowercase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F'''{name}: {attributes}\n''' raise ValueError(snake_case__ ) if __name__ == "__main__": check_config_attributes()
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __a = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
def a ( snake_case__: int ): '''simple docstring''' lowercase_ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(2_7)) print(perfect_cube(4))
30
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig __a = logging.get_logger(__name__) # General docstring __a = 'RegNetConfig' # Base docstring __a = 'facebook/regnet-y-040' __a = [1, 1_0_8_8, 7, 7] # Image classification docstring __a = 'facebook/regnet-y-040' __a = 'tabby, tabby cat' __a = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) lowercase_ = ACTaFN[activation] if activation is not None else tf.identity def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_channels lowercase_ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]: lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) ) lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ ) class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) lowercase_ = [ tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) for layer_module in self.attention: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden_state * pooled return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer lowercase_ = [ # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ), *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int: for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention: lowercase_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ ) if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ ) @keras_serializable class lowercase__( tf.keras.layers.Layer ): """simple docstring""" a :str = RegNetConfig def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' ) lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) @unpack_inputs def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.encoder( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = encoder_outputs[0] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) # Change to NCHW output format have uniformity in the modules lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Tuple = RegNetConfig a :Any = 'regnet' a :List[str] = 'pixel_values' @property def _lowercase ( self : List[str] ) -> str: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} __a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' __a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_labels lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) # classification head lowercase_ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = outputs.pooler_output if return_dict else outputs[1] lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ ) lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ ) lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) if not return_dict: lowercase_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
30
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowercase__( UpperCAmelCase ): """simple docstring""" a :Dict = 'megatron-bert' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict=2_9_0_5_6 , SCREAMING_SNAKE_CASE_ : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_4 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Any=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : int=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : int="absolute" , SCREAMING_SNAKE_CASE_ : int=True , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache
30
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __a = logging.get_logger(__name__) def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ): '''simple docstring''' # Recurse if needed if "." in tensor_name: lowercase_ = tensor_name.split('''.''' ) for split in splits[:-1]: lowercase_ = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) lowercase_ = new_module lowercase_ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) lowercase_ = tensor_name in module._buffers lowercase_ = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) lowercase_ = False lowercase_ = False if is_buffer or not is_bitsandbytes_available(): lowercase_ = False lowercase_ = False else: lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: lowercase_ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to('''cpu''' ) if value.dtype == torch.inta: lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: lowercase_ = torch.tensor(snake_case__ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: lowercase_ = new_value.T lowercase_ = old_value.__dict__ if is_abit: lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) lowercase_ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to(snake_case__ ) else: lowercase_ = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: lowercase_ = new_value else: lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) lowercase_ = new_value def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ): '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: lowercase_ = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): lowercase_ , lowercase_ = module.weight.shape else: lowercase_ = module.in_features lowercase_ = module.out_features if quantization_config.quantization_method() == "llm_int8": lowercase_ = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) lowercase_ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: lowercase_ = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) lowercase_ = True # Store the module class in case we need to transpose the weight later lowercase_ = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ): '''simple docstring''' lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def a ( *snake_case__: str , **snake_case__: Dict ): '''simple docstring''' warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def a ( *snake_case__: Any , **snake_case__: List[Any] ): '''simple docstring''' warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def a ( snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() lowercase_ = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowercase_ = sum(snake_case__ , [] ) lowercase_ = len(snake_case__ ) > 0 # Check if it is a base model lowercase_ = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowercase_ = list(model.named_children() ) lowercase_ = [list_modules[-1][0]] # add last module together with tied weights lowercase_ = set(snake_case__ ) - set(snake_case__ ) lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys lowercase_ = ['''.weight''', '''.bias'''] lowercase_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowercase_ = name.replace(snake_case__ , '''''' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
30
1
def a ( snake_case__: int = 4_000_000 ): '''simple docstring''' lowercase_ = [] lowercase_ , lowercase_ = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(snake_case__ ) lowercase_ , lowercase_ = b, a + b return sum(snake_case__ ) if __name__ == "__main__": print(f"{solution() = }")
30
import argparse import os import re __a = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __a = re.compile(r'\s*\(\s*"(\S[^"]+)"') def a ( snake_case__: str , snake_case__: bool = False ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: lowercase_ = f.read() lowercase_ = content.split('''\n''' ) lowercase_ = [] lowercase_ = 0 while line_idx < len(snake_case__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowercase_ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowercase_ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(snake_case__ ) ) elif "\n".join(snake_case__ ) != content: return True def a ( snake_case__: bool = False ): '''simple docstring''' lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )] lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames] if not overwrite and any(snake_case__ ): lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix''' ''' this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __a = parser.parse_args() sort_all_auto_mappings(not args.check_only)
30
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if index == number_of_items: return 0 lowercase_ = 0 lowercase_ = 0 lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 ) if weights[index] <= max_weight: lowercase_ = values[index] + knapsack( snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 ) return max(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
from ....configuration_utils import PretrainedConfig from ....utils import logging __a = logging.get_logger(__name__) __a = { 'Visual-Attention-Network/van-base': ( 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json' ), } class lowercase__( UpperCAmelCase ): """simple docstring""" a :str = 'van' def __init__( self : int , SCREAMING_SNAKE_CASE_ : int=2_2_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[3, 3, 1_2, 3] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[8, 8, 4, 4] , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-6 , SCREAMING_SNAKE_CASE_ : Any=1e-2 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , **SCREAMING_SNAKE_CASE_ : int , ) -> Union[str, Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = image_size lowercase_ = num_channels lowercase_ = patch_sizes lowercase_ = strides lowercase_ = hidden_sizes lowercase_ = depths lowercase_ = mlp_ratios lowercase_ = hidden_act lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = layer_scale_init_value lowercase_ = drop_path_rate lowercase_ = dropout_rate
30
import argparse from collections import defaultdict import yaml __a = 'docs/source/en/_toctree.yml' def a ( snake_case__: Dict ): '''simple docstring''' lowercase_ = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 lowercase_ = [key for key, value in counts.items() if value > 1] lowercase_ = [] for duplicate_key in duplicates: lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def a ( snake_case__: List[Any]=False ): '''simple docstring''' with open(snake_case__ , encoding='''utf-8''' ) as f: lowercase_ = yaml.safe_load(f.read() ) # Get to the API doc lowercase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase_ = content[api_idx]['''sections'''] # Then to the model doc lowercase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowercase_ = api_doc[model_idx]['''sections'''] lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section] lowercase_ = False for idx, modality_doc in modalities_docs: lowercase_ = modality_doc['''sections'''] lowercase_ = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: lowercase_ = True if overwrite: lowercase_ = new_modality_doc if diff: if overwrite: lowercase_ = model_doc lowercase_ = api_doc with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a = parser.parse_args() check_model_doc(args.fix_and_overwrite)
30
1
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def a ( snake_case__: Any , snake_case__: Union[str, Any] , snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = OmegaConf.load(snake_case__ ) lowercase_ = torch.load(snake_case__ , map_location='''cpu''' )['''model'''] lowercase_ = list(state_dict.keys() ) # extract state_dict for VQVAE lowercase_ = {} lowercase_ = '''first_stage_model.''' for key in keys: if key.startswith(snake_case__ ): lowercase_ = state_dict[key] # extract state_dict for UNetLDM lowercase_ = {} lowercase_ = '''model.diffusion_model.''' for key in keys: if key.startswith(snake_case__ ): lowercase_ = state_dict[key] lowercase_ = config.model.params.first_stage_config.params lowercase_ = config.model.params.unet_config.params lowercase_ = VQModel(**snake_case__ ).eval() vqvae.load_state_dict(snake_case__ ) lowercase_ = UNetLDMModel(**snake_case__ ).eval() unet.load_state_dict(snake_case__ ) lowercase_ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , ) lowercase_ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ ) pipeline.save_pretrained(snake_case__ ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) __a = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
30
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[str, Any] = 'upernet' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = backbone_config.get('''model_type''' ) lowercase_ = CONFIG_MAPPING[backbone_model_type] lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) lowercase_ = backbone_config lowercase_ = hidden_size lowercase_ = initializer_range lowercase_ = pool_scales lowercase_ = use_auxiliary_head lowercase_ = auxiliary_loss_weight lowercase_ = auxiliary_in_channels lowercase_ = auxiliary_channels lowercase_ = auxiliary_num_convs lowercase_ = auxiliary_concat_input lowercase_ = loss_ignore_index def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.backbone_config.to_dict() lowercase_ = self.__class__.model_type return output
30
1
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def a ( snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: List[str] ): '''simple docstring''' # Initialise PyTorch model lowercase_ = AlbertConfig.from_json_file(snake_case__ ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ = AlbertForPreTraining(snake_case__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , snake_case__ ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
30
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __a = 'sshleifer/bart-tiny-random' __a = 'patrickvonplaten/t5-tiny-random' @require_torch class lowercase__( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Any ) -> Tuple: return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _lowercase ( self : List[Any] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: with self.assertRaises(SCREAMING_SNAKE_CASE_ ): create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
30
1
import collections import os import re from pathlib import Path __a = 'src/transformers' # Matches is_xxx_available() __a = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __a = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __a = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __a = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __a = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __a = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __a = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __a = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __a = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __a = re.compile(r'^\s*try:') # Catches a line with else: __a = re.compile(r'^\s*else:') def a ( snake_case__: Union[str, Any] ): '''simple docstring''' if _re_test_backend.search(snake_case__ ) is None: return None lowercase_ = [b[0] for b in _re_backend.findall(snake_case__ )] backends.sort() return "_and_".join(snake_case__ ) def a ( snake_case__: Optional[Any] ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowercase_ = f.readlines() lowercase_ = 0 while line_index < len(snake_case__ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(snake_case__ ): return None # First grab the objects without a specific backend in _import_structure lowercase_ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowercase_ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(snake_case__ ): lowercase_ = _re_one_line_import_struct.search(snake_case__ ).groups()[0] lowercase_ = re.findall(r'''\[([^\]]+)\]''' , snake_case__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowercase_ = _re_import_struct_key_value.search(snake_case__ ) if single_line_import_search is not None: lowercase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(snake_case__ ) > 0] objects.extend(snake_case__ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowercase_ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowercase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowercase_ = lines[line_index] if _re_import_struct_add_one.search(snake_case__ ) is not None: objects.append(_re_import_struct_add_one.search(snake_case__ ).groups()[0] ) elif _re_import_struct_add_many.search(snake_case__ ) is not None: lowercase_ = _re_import_struct_add_many.search(snake_case__ ).groups()[0].split(''', ''' ) lowercase_ = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0] objects.extend(snake_case__ ) elif _re_between_brackets.search(snake_case__ ) is not None: lowercase_ = _re_between_brackets.search(snake_case__ ).groups()[0].split(''', ''' ) lowercase_ = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0] objects.extend(snake_case__ ) elif _re_quote_object.search(snake_case__ ) is not None: objects.append(_re_quote_object.search(snake_case__ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowercase_ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowercase_ = [] while ( line_index < len(snake_case__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowercase_ = lines[line_index] lowercase_ = _re_import.search(snake_case__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowercase_ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(snake_case__ ): # If the line is an if is_backend_available, we grab all objects associated. lowercase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowercase_ = lines[line_index] lowercase_ = _re_import.search(snake_case__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowercase_ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a ( snake_case__: Any , snake_case__: Tuple ): '''simple docstring''' def find_duplicates(snake_case__: Optional[int] ): return [k for k, v in collections.Counter(snake_case__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowercase_ = [] for key in import_dict_objects.keys(): lowercase_ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowercase_ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowercase_ = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a ( ): '''simple docstring''' lowercase_ = [] for root, _, files in os.walk(snake_case__ ): if "__init__.py" in files: lowercase_ = os.path.join(snake_case__ , '''__init__.py''' ) lowercase_ = parse_init(snake_case__ ) if objects is not None: lowercase_ = analyze_results(*snake_case__ ) if len(snake_case__ ) > 0: lowercase_ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(snake_case__ ) ) if len(snake_case__ ) > 0: raise ValueError('''\n\n'''.join(snake_case__ ) ) def a ( ): '''simple docstring''' lowercase_ = [] for path, directories, files in os.walk(snake_case__ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(snake_case__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(snake_case__ ) / folder).glob('''*.py''' ) ) ) == 0: continue lowercase_ = str((Path(snake_case__ ) / folder).relative_to(snake_case__ ) ) lowercase_ = short_path.replace(os.path.sep , '''.''' ) submodules.append(snake_case__ ) for fname in files: if fname == "__init__.py": continue lowercase_ = str((Path(snake_case__ ) / fname).relative_to(snake_case__ ) ) lowercase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(snake_case__ ) return submodules __a = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def a ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowercase_ = direct_transformers_import(snake_case__ ) lowercase_ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(snake_case__ , '''__init__.py''' ) , '''r''' ) as f: lowercase_ = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , snake_case__ ) ) ) lowercase_ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(snake_case__ ) > 0: lowercase_ = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
30
def a ( snake_case__: int = 100 ): '''simple docstring''' lowercase_ = (n * (n + 1) // 2) ** 2 lowercase_ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"{solution() = }")
30
1
def a ( snake_case__: int = 600_851_475_143 ): '''simple docstring''' try: lowercase_ = int(snake_case__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase_ = 2 lowercase_ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase_ = i while n % i == 0: lowercase_ = n // i i += 1 return int(snake_case__ ) if __name__ == "__main__": print(f"{solution() = }")
30
import logging from transformers.configuration_utils import PretrainedConfig __a = logging.getLogger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = 'masked_bert' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = pruning_method lowercase_ = mask_init lowercase_ = mask_scale
30
1
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def a ( snake_case__: List[str] ): '''simple docstring''' lowercase_ = {} lowercase_ = tokenizer(example['''content'''] , truncation=snake_case__ )['''input_ids'''] lowercase_ = len(example['''content'''] ) / len(output['''input_ids'''] ) return output __a = HfArgumentParser(PretokenizationArguments) __a = parser.parse_args() if args.num_workers is None: __a = multiprocessing.cpu_count() __a = AutoTokenizer.from_pretrained(args.tokenizer_dir) __a = time.time() __a = load_dataset(args.dataset_name, split='train') print(f"Dataset loaded in {time.time()-t_start:.2f}s") __a = time.time() __a = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(f"Dataset tokenized in {time.time()-t_start:.2f}s") __a = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f"Data pushed to the hub in {time.time()-t_start:.2f}s")
30
import os def a ( ): '''simple docstring''' lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
30
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['ConvNextFeatureExtractor'] __a = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure)
30
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ): '''simple docstring''' lowercase_ = { '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) assert base_extractor.is_extractable(snake_case__ ) lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ): '''simple docstring''' lowercase_ = { '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } lowercase_ = input_paths[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) lowercase_ = Extractor.infer_extractor_format(snake_case__ ) assert extractor_format is not None lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(snake_case__ , snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_dot_dot''' directory.mkdir() lowercase_ = directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) ) return path @pytest.fixture def a ( snake_case__: int ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_sym_link''' directory.mkdir() lowercase_ = directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ ) with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , ) def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = { '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } lowercase_ = insecure_tar_files[insecure_tar_file] lowercase_ = tmp_path / '''extracted''' TarExtractor.extract(snake_case__ , snake_case__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def a ( snake_case__: Optional[int] ): '''simple docstring''' # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number lowercase_ = tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 lowercase_ = ( B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(snake_case__ ) assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
30
1
import math def a ( snake_case__: int = 100 ): '''simple docstring''' lowercase_ = sum(i * i for i in range(1 , n + 1 ) ) lowercase_ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"{solution() = }")
30
from __future__ import annotations def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase_ , lowercase_ = array[indexa], array[indexa] def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) for i in range(snake_case__ , low + middle ): comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ ) def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 ) bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
30
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __a = logging.get_logger(__name__) __a = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config is None: assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) lowercase_ = self.model.config else: lowercase_ = config lowercase_ = data_args lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' ''' padding..''' ) if self.args.label_smoothing == 0: lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowercase_ = label_smoothed_nll_loss def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: if self.optimizer is None: lowercase_ = ['''bias''', '''LayerNorm.weight'''] lowercase_ = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] lowercase_ = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowercase_ = Adafactor lowercase_ = {'''scale_parameter''': False, '''relative_step''': False} else: lowercase_ = AdamW lowercase_ = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } lowercase_ = self.args.learning_rate if self.sharded_ddp: lowercase_ = OSS( params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) else: lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.lr_scheduler is None: lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict: lowercase_ = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowercase_ = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowercase_ = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) return scheduler def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2] else: # compute label smoothed loss lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]: lowercase_ = inputs.pop('''labels''' ) lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return loss def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ ) lowercase_ = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowercase_ = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) lowercase_ = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowercase_ = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple: # If PAD token is not defined at least EOS token has to be defined lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' f''' padded to `max_length`={max_length}''' ) lowercase_ = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowercase_ = tensor return padded_tensor
30
from __future__ import annotations from collections.abc import MutableSequence class lowercase__: """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None: if len(SCREAMING_SNAKE_CASE_ ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = degree def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: if self.degree > polynomial_a.degree: lowercase_ = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ ) else: lowercase_ = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : int ) -> Polynomial: return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: lowercase_ = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float: lowercase_ = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Tuple ) -> str: lowercase_ = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ ) return polynomial def __repr__( self : Optional[Any] ) -> str: return self.__str__() def _lowercase ( self : int ) -> Polynomial: lowercase_ = [0] * self.degree for i in range(self.degree ): lowercase_ = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial: lowercase_ = [0] * (self.degree + 2) lowercase_ = constant for i in range(self.degree + 1 ): lowercase_ = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ ) def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool: return not self.__eq__(SCREAMING_SNAKE_CASE_ )
30
1
from __future__ import annotations def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase_ , lowercase_ = array[indexa], array[indexa] def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) for i in range(snake_case__ , low + middle ): comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ ) def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 ) bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
30
import itertools import math def a ( snake_case__: int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( ): '''simple docstring''' lowercase_ = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def a ( snake_case__: int = 10_001 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) ) if __name__ == "__main__": print(f"{solution() = }")
30
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[Any] = 'char' a :List[str] = 'bpe' a :List[Any] = 'wp' __a = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowercase__( UpperCAmelCase ): """simple docstring""" a :List[Any] = ['image_processor', 'char_tokenizer'] a :Optional[int] = 'ViTImageProcessor' a :int = 'MgpstrTokenizer' def __init__( self : int , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Any: lowercase_ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , SCREAMING_SNAKE_CASE_ , ) lowercase_ = kwargs.pop('''feature_extractor''' ) lowercase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) lowercase_ = tokenizer lowercase_ = AutoTokenizer.from_pretrained('''gpt2''' ) lowercase_ = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=None , **SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]: if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: lowercase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if text is not None: lowercase_ = self.char_tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if text is None: return inputs elif images is None: return encodings else: lowercase_ = encodings['''input_ids'''] return inputs def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> str: lowercase_ , lowercase_ , lowercase_ = sequences lowercase_ = char_preds.size(0 ) lowercase_ , lowercase_ = self._decode_helper(SCREAMING_SNAKE_CASE_ , '''char''' ) lowercase_ , lowercase_ = self._decode_helper(SCREAMING_SNAKE_CASE_ , '''bpe''' ) lowercase_ , lowercase_ = self._decode_helper(SCREAMING_SNAKE_CASE_ , '''wp''' ) lowercase_ = [] lowercase_ = [] for i in range(SCREAMING_SNAKE_CASE_ ): lowercase_ = [char_scores[i], bpe_scores[i], wp_scores[i]] lowercase_ = [char_strs[i], bpe_strs[i], wp_strs[i]] lowercase_ = scores.index(max(SCREAMING_SNAKE_CASE_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowercase_ = {} lowercase_ = final_strs lowercase_ = final_scores lowercase_ = char_strs lowercase_ = bpe_strs lowercase_ = wp_strs return out def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]: if format == DecodeType.CHARACTER: lowercase_ = self.char_decode lowercase_ = 1 lowercase_ = '''[s]''' elif format == DecodeType.BPE: lowercase_ = self.bpe_decode lowercase_ = 2 lowercase_ = '''#''' elif format == DecodeType.WORDPIECE: lowercase_ = self.wp_decode lowercase_ = 1_0_2 lowercase_ = '''[SEP]''' else: raise ValueError(f'''Format {format} is not supported.''' ) lowercase_ , lowercase_ = [], [] lowercase_ = pred_logits.size(0 ) lowercase_ = pred_logits.size(1 ) lowercase_ , lowercase_ = pred_logits.topk(1 , dim=-1 , largest=SCREAMING_SNAKE_CASE_ , sorted=SCREAMING_SNAKE_CASE_ ) lowercase_ = preds_index.view(-1 , SCREAMING_SNAKE_CASE_ )[:, 1:] lowercase_ = decoder(SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ = torch.nn.functional.softmax(SCREAMING_SNAKE_CASE_ , dim=2 ).max(dim=2 ) lowercase_ = preds_max_prob[:, 1:] for index in range(SCREAMING_SNAKE_CASE_ ): lowercase_ = preds_str[index].find(SCREAMING_SNAKE_CASE_ ) lowercase_ = preds_str[index][:pred_eos] lowercase_ = preds_index[index].cpu().tolist() lowercase_ = pred_index.index(SCREAMING_SNAKE_CASE_ ) if eos_token in pred_index else -1 lowercase_ = preds_max_prob[index][: pred_eos_index + 1] lowercase_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(SCREAMING_SNAKE_CASE_ ) conf_scores.append(SCREAMING_SNAKE_CASE_ ) return dec_strs, conf_scores def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]: lowercase_ = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )] return decode_strs def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str: return self.bpe_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]: lowercase_ = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )] return decode_strs
30
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
30
1
import os # Precomputes a list of the 100 first triangular numbers __a = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def a ( ): '''simple docstring''' lowercase_ = os.path.dirname(os.path.realpath(snake_case__ ) ) lowercase_ = os.path.join(snake_case__ , '''words.txt''' ) lowercase_ = '''''' with open(snake_case__ ) as f: lowercase_ = f.readline() lowercase_ = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )] lowercase_ = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
30
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __a = logging.get_logger(__name__) __a = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config is None: assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) lowercase_ = self.model.config else: lowercase_ = config lowercase_ = data_args lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' ''' padding..''' ) if self.args.label_smoothing == 0: lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowercase_ = label_smoothed_nll_loss def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: if self.optimizer is None: lowercase_ = ['''bias''', '''LayerNorm.weight'''] lowercase_ = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] lowercase_ = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowercase_ = Adafactor lowercase_ = {'''scale_parameter''': False, '''relative_step''': False} else: lowercase_ = AdamW lowercase_ = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } lowercase_ = self.args.learning_rate if self.sharded_ddp: lowercase_ = OSS( params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) else: lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.lr_scheduler is None: lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict: lowercase_ = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowercase_ = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowercase_ = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) return scheduler def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2] else: # compute label smoothed loss lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]: lowercase_ = inputs.pop('''labels''' ) lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return loss def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ ) lowercase_ = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowercase_ = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) lowercase_ = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowercase_ = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple: # If PAD token is not defined at least EOS token has to be defined lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' f''' padded to `max_length`={max_length}''' ) lowercase_ = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowercase_ = tensor return padded_tensor
30
1
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :bool = field(default=UpperCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} ) a :bool = field( default=UpperCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) a :Optional[int] = field( default=UpperCAmelCase , metadata={ 'help': ( 'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `max_length` value of the model configuration.' ) } , ) a :Optional[int] = field( default=UpperCAmelCase , metadata={ 'help': ( 'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `num_beams` value of the model configuration.' ) } , ) a :Optional[Union[str, Path, GenerationConfig]] = field( default=UpperCAmelCase , metadata={ 'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.' } , ) def _lowercase ( self : Dict ) -> List[Any]: lowercase_ = super().to_dict() for k, v in d.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = v.to_dict() return d
30
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = 1_0 def _lowercase ( self : int ) -> List[str]: lowercase_ = [1, 2, 3, 4] lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[Any]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> List[Any]: lowercase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = '''''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) lowercase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = ['''It was the best of times.'''] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ = torch.tensor([1, 2, 3, 4] ) lowercase_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() ) def _lowercase ( self : int ) -> Dict: lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def _lowercase ( self : List[str] ) -> Tuple: lowercase_ = 1_0_1 lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
1
def a ( snake_case__: int , snake_case__: list ): '''simple docstring''' _enforce_args(snake_case__ , snake_case__ ) if n == 0: return 0 lowercase_ = float('''-inf''' ) for i in range(1 , n + 1 ): lowercase_ = max( snake_case__ , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case__ ) ) return max_revue def a ( snake_case__: int , snake_case__: list ): '''simple docstring''' _enforce_args(snake_case__ , snake_case__ ) lowercase_ = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case__ , snake_case__ , snake_case__ ) def a ( snake_case__: int , snake_case__: list , snake_case__: list ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowercase_ = float('''-inf''' ) for i in range(1 , n + 1 ): lowercase_ = max( snake_case__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case__ , snake_case__ ) , ) lowercase_ = max_revenue return max_rev[n] def a ( snake_case__: int , snake_case__: list ): '''simple docstring''' _enforce_args(snake_case__ , snake_case__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowercase_ = [float('''-inf''' ) for _ in range(n + 1 )] lowercase_ = 0 for i in range(1 , n + 1 ): lowercase_ = max_rev[i] for j in range(1 , i + 1 ): lowercase_ = max(snake_case__ , prices[j - 1] + max_rev[i - j] ) lowercase_ = max_revenue_i return max_rev[n] def a ( snake_case__: int , snake_case__: list ): '''simple docstring''' if n < 0: lowercase_ = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case__ ) if n > len(snake_case__ ): lowercase_ = ( '''Each integral piece of rod must have a corresponding price. ''' F'''Got n = {n} but length of prices = {len(snake_case__ )}''' ) raise ValueError(snake_case__ ) def a ( ): '''simple docstring''' lowercase_ = [6, 10, 12, 15, 20, 23] lowercase_ = len(snake_case__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowercase_ = 36 lowercase_ = top_down_cut_rod(snake_case__ , snake_case__ ) lowercase_ = bottom_up_cut_rod(snake_case__ , snake_case__ ) lowercase_ = naive_cut_rod_recursive(snake_case__ , snake_case__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
30
def a ( snake_case__: list ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] lowercase_ = [] def generate(snake_case__: int , snake_case__: list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowercase_ , lowercase_ = arr[k - 1], arr[i] else: # k is odd lowercase_ , lowercase_ = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item) for item in user_input.split(',')] print(heaps(arr))
30
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { 'configuration_conditional_detr': [ 'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConditionalDetrConfig', 'ConditionalDetrOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['ConditionalDetrFeatureExtractor'] __a = ['ConditionalDetrImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConditionalDetrForObjectDetection', 'ConditionalDetrForSegmentation', 'ConditionalDetrModel', 'ConditionalDetrPreTrainedModel', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , ) lowercase_ = parser.parse_args() return args def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ): '''simple docstring''' if not len(snake_case__ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowercase_ , lowercase_ = imgs[0].size lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) ) lowercase_ , lowercase_ = grid.size for i, img in enumerate(snake_case__ ): grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) ) return grid def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ): '''simple docstring''' lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ ) lowercase_ = pipeline( snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images lowercase_ = int(math.sqrt(snake_case__ ) ) lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __a = parse_args() # Load models and create wrapper for stable diffusion __a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') __a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') __a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') __a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') __a = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __a = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): __a = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: __a = unet.to(torch.device('cuda', args.cuda_id)) __a = pipeline.to(unet.device) __a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) __a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
30
1
def a ( snake_case__: str ): '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(snake_case__ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('doctest').testmod()
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class lowercase__( UpperCAmelCase ): """simple docstring""" a :str = CustomTokenizer pass
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['DeiTFeatureExtractor'] __a = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class lowercase__( unittest.TestCase ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict=1_3 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]=9_9 , SCREAMING_SNAKE_CASE_ : Tuple=3_2 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=4 , ) -> Dict: lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_attention_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_choices def _lowercase ( self : Optional[int] ) -> Union[str, Any]: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_attention_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowercase ( self : Optional[Any] ) -> List[str]: lowercase_ = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[Any] = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase ( self : Tuple ) -> str: lowercase_ = FlaxAlbertModelTester(self ) @slow def _lowercase ( self : List[Any] ) -> int: for model_class_name in self.all_model_classes: lowercase_ = model_class_name.from_pretrained('''albert-base-v2''' ) lowercase_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @require_flax class lowercase__( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Any ) -> Dict: lowercase_ = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) lowercase_ = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = (1, 1_1, 7_6_8) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) lowercase_ = np.array( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __a = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) __a = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['BeitFeatureExtractor'] __a = ['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig __a = logging.get_logger(__name__) # General docstring __a = 'RegNetConfig' # Base docstring __a = 'facebook/regnet-y-040' __a = [1, 1_0_8_8, 7, 7] # Image classification docstring __a = 'facebook/regnet-y-040' __a = 'tabby, tabby cat' __a = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) lowercase_ = ACTaFN[activation] if activation is not None else tf.identity def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_channels lowercase_ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]: lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) ) lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ ) class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) lowercase_ = [ tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) for layer_module in self.attention: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden_state * pooled return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer lowercase_ = [ # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ), *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int: for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention: lowercase_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ ) if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ ) @keras_serializable class lowercase__( tf.keras.layers.Layer ): """simple docstring""" a :str = RegNetConfig def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' ) lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) @unpack_inputs def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.encoder( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = encoder_outputs[0] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) # Change to NCHW output format have uniformity in the modules lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Tuple = RegNetConfig a :Any = 'regnet' a :List[str] = 'pixel_values' @property def _lowercase ( self : List[str] ) -> str: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} __a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' __a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_labels lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) # classification head lowercase_ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = outputs.pooler_output if return_dict else outputs[1] lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ ) lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ ) lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) if not return_dict: lowercase_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
30
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { 'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'], 'tokenization_roformer': ['RoFormerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RoFormerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoFormerForCausalLM', 'RoFormerForMaskedLM', 'RoFormerForMultipleChoice', 'RoFormerForQuestionAnswering', 'RoFormerForSequenceClassification', 'RoFormerForTokenClassification', 'RoFormerLayer', 'RoFormerModel', 'RoFormerPreTrainedModel', 'load_tf_weights_in_roformer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRoFormerForCausalLM', 'TFRoFormerForMaskedLM', 'TFRoFormerForMultipleChoice', 'TFRoFormerForQuestionAnswering', 'TFRoFormerForSequenceClassification', 'TFRoFormerForTokenClassification', 'TFRoFormerLayer', 'TFRoFormerModel', 'TFRoFormerPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxRoFormerForMaskedLM', 'FlaxRoFormerForMultipleChoice', 'FlaxRoFormerForQuestionAnswering', 'FlaxRoFormerForSequenceClassification', 'FlaxRoFormerForTokenClassification', 'FlaxRoFormerModel', 'FlaxRoFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __a = logging.get_logger(__name__) def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ): '''simple docstring''' # Recurse if needed if "." in tensor_name: lowercase_ = tensor_name.split('''.''' ) for split in splits[:-1]: lowercase_ = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) lowercase_ = new_module lowercase_ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) lowercase_ = tensor_name in module._buffers lowercase_ = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) lowercase_ = False lowercase_ = False if is_buffer or not is_bitsandbytes_available(): lowercase_ = False lowercase_ = False else: lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: lowercase_ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to('''cpu''' ) if value.dtype == torch.inta: lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: lowercase_ = torch.tensor(snake_case__ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: lowercase_ = new_value.T lowercase_ = old_value.__dict__ if is_abit: lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) lowercase_ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to(snake_case__ ) else: lowercase_ = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: lowercase_ = new_value else: lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) lowercase_ = new_value def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ): '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: lowercase_ = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): lowercase_ , lowercase_ = module.weight.shape else: lowercase_ = module.in_features lowercase_ = module.out_features if quantization_config.quantization_method() == "llm_int8": lowercase_ = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) lowercase_ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: lowercase_ = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) lowercase_ = True # Store the module class in case we need to transpose the weight later lowercase_ = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ): '''simple docstring''' lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def a ( *snake_case__: str , **snake_case__: Dict ): '''simple docstring''' warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def a ( *snake_case__: Any , **snake_case__: List[Any] ): '''simple docstring''' warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def a ( snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() lowercase_ = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowercase_ = sum(snake_case__ , [] ) lowercase_ = len(snake_case__ ) > 0 # Check if it is a base model lowercase_ = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowercase_ = list(model.named_children() ) lowercase_ = [list_modules[-1][0]] # add last module together with tied weights lowercase_ = set(snake_case__ ) - set(snake_case__ ) lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys lowercase_ = ['''.weight''', '''.bias'''] lowercase_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowercase_ = name.replace(snake_case__ , '''''' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
30
1
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def a ( snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def a ( snake_case__: Tuple ): '''simple docstring''' lowercase_ , lowercase_ = emb.weight.shape lowercase_ = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowercase_ = emb.weight.data return lin_layer def a ( snake_case__: str , snake_case__: Any="facebook/mbart-large-en-ro" , snake_case__: Optional[Any]=False , snake_case__: Optional[Any]=False ): '''simple docstring''' lowercase_ = torch.load(snake_case__ , map_location='''cpu''' )['''model'''] remove_ignore_keys_(snake_case__ ) lowercase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase_ = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: lowercase_ = '''relu''' lowercase_ = state_dict['''decoder.embed_tokens.weight'''] lowercase_ = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: lowercase_ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') __a = parser.parse_args() __a = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
30
import argparse import os import re __a = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __a = re.compile(r'\s*\(\s*"(\S[^"]+)"') def a ( snake_case__: str , snake_case__: bool = False ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: lowercase_ = f.read() lowercase_ = content.split('''\n''' ) lowercase_ = [] lowercase_ = 0 while line_idx < len(snake_case__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowercase_ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowercase_ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(snake_case__ ) ) elif "\n".join(snake_case__ ) != content: return True def a ( snake_case__: bool = False ): '''simple docstring''' lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )] lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames] if not overwrite and any(snake_case__ ): lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix''' ''' this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __a = parser.parse_args() sort_all_auto_mappings(not args.check_only)
30
1
from __future__ import annotations __a = 1.6021E-19 # units = C def a ( snake_case__: float , snake_case__: float , snake_case__: float , ): '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
30
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if index == number_of_items: return 0 lowercase_ = 0 lowercase_ = 0 lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 ) if weights[index] <= max_weight: lowercase_ = values[index] + knapsack( snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 ) return max(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = 'vision-encoder-decoder' a :Any = True def __init__( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) lowercase_ = kwargs.pop('''encoder''' ) lowercase_ = encoder_config.pop('''model_type''' ) lowercase_ = kwargs.pop('''decoder''' ) lowercase_ = decoder_config.pop('''model_type''' ) lowercase_ = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = True @classmethod def _lowercase ( cls : Tuple , SCREAMING_SNAKE_CASE_ : PretrainedConfig , SCREAMING_SNAKE_CASE_ : PretrainedConfig , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> PretrainedConfig: logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowercase_ = True lowercase_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] ) -> Any: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.encoder.to_dict() lowercase_ = self.decoder.to_dict() lowercase_ = self.__class__.model_type return output class lowercase__( UpperCAmelCase ): """simple docstring""" a :Any = version.parse('1.11' ) @property def _lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _lowercase ( self : Dict ) -> float: return 1e-4 @property def _lowercase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class lowercase__( UpperCAmelCase ): """simple docstring""" @property def _lowercase ( self : Any ) -> Mapping[str, Mapping[int, str]]: lowercase_ = OrderedDict() lowercase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase_ = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]: import torch lowercase_ = OrderedDict() lowercase_ = super().generate_dummy_inputs( SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ = dummy_input['''input_ids'''].shape lowercase_ = (batch, encoder_sequence, self._config.encoder_hidden_size) lowercase_ = dummy_input.pop('''input_ids''' ) lowercase_ = dummy_input.pop('''attention_mask''' ) lowercase_ = torch.zeros(SCREAMING_SNAKE_CASE_ ) return common_inputs class lowercase__( UpperCAmelCase ): """simple docstring""" @property def _lowercase ( self : Dict ) -> None: pass def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : PretrainedConfig ) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : PretrainedConfig , SCREAMING_SNAKE_CASE_ : PretrainedConfig , SCREAMING_SNAKE_CASE_ : str = "default" ) -> OnnxConfig: lowercase_ = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
import argparse from collections import defaultdict import yaml __a = 'docs/source/en/_toctree.yml' def a ( snake_case__: Dict ): '''simple docstring''' lowercase_ = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 lowercase_ = [key for key, value in counts.items() if value > 1] lowercase_ = [] for duplicate_key in duplicates: lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def a ( snake_case__: List[Any]=False ): '''simple docstring''' with open(snake_case__ , encoding='''utf-8''' ) as f: lowercase_ = yaml.safe_load(f.read() ) # Get to the API doc lowercase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase_ = content[api_idx]['''sections'''] # Then to the model doc lowercase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowercase_ = api_doc[model_idx]['''sections'''] lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section] lowercase_ = False for idx, modality_doc in modalities_docs: lowercase_ = modality_doc['''sections'''] lowercase_ = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: lowercase_ = True if overwrite: lowercase_ = new_modality_doc if diff: if overwrite: lowercase_ = model_doc lowercase_ = api_doc with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a = parser.parse_args() check_model_doc(args.fix_and_overwrite)
30
1
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :int = ['input_features', 'attention_mask'] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any=8_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6_0_0_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_5 , SCREAMING_SNAKE_CASE_ : str="hamming_window" , SCREAMING_SNAKE_CASE_ : Dict=3_27_68.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.97 , SCREAMING_SNAKE_CASE_ : Any=1.0 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> Optional[int]: super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = feature_size lowercase_ = sampling_rate lowercase_ = padding_value lowercase_ = hop_length lowercase_ = win_length lowercase_ = frame_signal_scale lowercase_ = preemphasis_coeff lowercase_ = mel_floor lowercase_ = normalize_means lowercase_ = normalize_vars lowercase_ = win_function lowercase_ = return_attention_mask lowercase_ = win_length * sampling_rate // 1_0_0_0 lowercase_ = hop_length * sampling_rate // 1_0_0_0 lowercase_ = optimal_fft_length(self.sample_size ) lowercase_ = (self.n_fft // 2) + 1 def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : np.array ) -> np.ndarray: if self.win_function == "hamming_window": lowercase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=SCREAMING_SNAKE_CASE_ ) else: lowercase_ = window_function(window_length=self.sample_size , name=self.win_function ) lowercase_ = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) lowercase_ = spectrogram( one_waveform * self.frame_signal_scale , window=SCREAMING_SNAKE_CASE_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=SCREAMING_SNAKE_CASE_ , preemphasis=self.preemphasis_coeff , mel_filters=SCREAMING_SNAKE_CASE_ , mel_floor=self.mel_floor , log_mel='''log''' , ) return msfc_features.T def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]: # make sure we normalize float32 arrays if self.normalize_means: lowercase_ = x[:input_length].mean(axis=0 ) lowercase_ = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if self.normalize_vars: lowercase_ = x[:input_length].std(axis=0 ) lowercase_ = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if input_length < x.shape[0]: lowercase_ = padding_value # make sure array is in float32 lowercase_ = x.astype(np.floataa ) return x def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]: lowercase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowercase_ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowercase_ = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): lowercase_ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase_ = [raw_speech] # extract fbank features lowercase_ = [self._extract_mfsc_features(SCREAMING_SNAKE_CASE_ ) for one_waveform in raw_speech] # convert into correct format for padding lowercase_ = BatchFeature({'''input_features''': features} ) lowercase_ = self.pad( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # make sure list is in array format lowercase_ = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ): lowercase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features] lowercase_ = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: lowercase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: lowercase_ = ( np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) lowercase_ = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ ) if return_tensors is not None: lowercase_ = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return padded_inputs
30
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[str, Any] = 'upernet' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = backbone_config.get('''model_type''' ) lowercase_ = CONFIG_MAPPING[backbone_model_type] lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) lowercase_ = backbone_config lowercase_ = hidden_size lowercase_ = initializer_range lowercase_ = pool_scales lowercase_ = use_auxiliary_head lowercase_ = auxiliary_loss_weight lowercase_ = auxiliary_in_channels lowercase_ = auxiliary_channels lowercase_ = auxiliary_num_convs lowercase_ = auxiliary_concat_input lowercase_ = loss_ignore_index def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.backbone_config.to_dict() lowercase_ = self.__class__.model_type return output
30
1
import logging from transformers.configuration_utils import PretrainedConfig __a = logging.getLogger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = 'masked_bert' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = pruning_method lowercase_ = mask_init lowercase_ = mask_scale
30
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __a = 'sshleifer/bart-tiny-random' __a = 'patrickvonplaten/t5-tiny-random' @require_torch class lowercase__( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Any ) -> Tuple: return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _lowercase ( self : List[Any] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: with self.assertRaises(SCREAMING_SNAKE_CASE_ ): create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
30
1
def a ( snake_case__: int , snake_case__: int ): '''simple docstring''' return base * power(snake_case__ , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('Raise base to the power of exponent using recursion...') __a = int(input('Enter the base: ').strip()) __a = int(input('Enter the exponent: ').strip()) __a = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents __a = 1 / result print(f"{base} to the power of {exponent} is {result}")
30
def a ( snake_case__: int = 100 ): '''simple docstring''' lowercase_ = (n * (n + 1) // 2) ** 2 lowercase_ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"{solution() = }")
30
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowercase__: """simple docstring""" @staticmethod def _lowercase ( *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> List[str]: pass @is_pipeline_test @require_vision class lowercase__( unittest.TestCase ): """simple docstring""" @require_torch def _lowercase ( self : List[Any] ) -> List[str]: lowercase_ = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase_ = image_classifier(SCREAMING_SNAKE_CASE_ , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(SCREAMING_SNAKE_CASE_ ) , [ [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}], [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}], ] , ) lowercase_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , [ [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], ] , ) @require_tf def _lowercase ( self : str ) -> Union[str, Any]: lowercase_ = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase_ = image_classifier(SCREAMING_SNAKE_CASE_ , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , ) lowercase_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , [ [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], [ {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''score''': 0.3_33, '''label''': ANY(SCREAMING_SNAKE_CASE_ )}, ], ] , ) @slow @require_torch def _lowercase ( self : int ) -> Dict: lowercase_ = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase_ = image_classifier(SCREAMING_SNAKE_CASE_ , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , [ {'''score''': 0.5_11, '''label''': '''remote'''}, {'''score''': 0.4_85, '''label''': '''cat'''}, {'''score''': 0.0_04, '''label''': '''plane'''}, ] , ) lowercase_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , [ [ {'''score''': 0.5_11, '''label''': '''remote'''}, {'''score''': 0.4_85, '''label''': '''cat'''}, {'''score''': 0.0_04, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def _lowercase ( self : Dict ) -> Union[str, Any]: lowercase_ = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase_ = image_classifier(SCREAMING_SNAKE_CASE_ , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , [ {'''score''': 0.5_11, '''label''': '''remote'''}, {'''score''': 0.4_85, '''label''': '''cat'''}, {'''score''': 0.0_04, '''label''': '''plane'''}, ] , ) lowercase_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , [ [ {'''score''': 0.5_11, '''label''': '''remote'''}, {'''score''': 0.4_85, '''label''': '''cat'''}, {'''score''': 0.0_04, '''label''': '''plane'''}, ], ] * 5 , )
30
import logging from transformers.configuration_utils import PretrainedConfig __a = logging.getLogger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = 'masked_bert' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = pruning_method lowercase_ = mask_init lowercase_ = mask_scale
30
1
from typing import TYPE_CHECKING from ...utils import _LazyModule __a = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import os def a ( ): '''simple docstring''' lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
30
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ): '''simple docstring''' lowercase_ = { '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) assert base_extractor.is_extractable(snake_case__ ) lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ): '''simple docstring''' lowercase_ = { '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } lowercase_ = input_paths[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) lowercase_ = Extractor.infer_extractor_format(snake_case__ ) assert extractor_format is not None lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(snake_case__ , snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_dot_dot''' directory.mkdir() lowercase_ = directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) ) return path @pytest.fixture def a ( snake_case__: int ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_sym_link''' directory.mkdir() lowercase_ = directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ ) with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , ) def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = { '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } lowercase_ = insecure_tar_files[insecure_tar_file] lowercase_ = tmp_path / '''extracted''' TarExtractor.extract(snake_case__ , snake_case__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def a ( snake_case__: Optional[int] ): '''simple docstring''' # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number lowercase_ = tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 lowercase_ = ( B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(snake_case__ ) assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
30
1
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def a ( snake_case__: Optional[Any]=None , snake_case__: Optional[Any]=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=snake_case__ ) @dataclass class lowercase__: """simple docstring""" a :str = field( metadata={'help': 'The csv file to plot.'} , ) a :bool = field( default=UpperCAmelCase , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , ) a :bool = field( default=UpperCAmelCase , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , ) a :bool = field( default=UpperCAmelCase , metadata={'help': 'Disable logarithmic scale when plotting'} , ) a :bool = field( default=UpperCAmelCase , metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' } , ) a :Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , ) a :Optional[List[str]] = list_field( default=UpperCAmelCase , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def a ( snake_case__: Dict ): '''simple docstring''' try: int(snake_case__ ) return True except ValueError: return False def a ( snake_case__: Optional[int] ): '''simple docstring''' try: float(snake_case__ ) return True except ValueError: return False class lowercase__: """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Any: lowercase_ = args lowercase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='''''' ) as csv_file: lowercase_ = csv.DictReader(SCREAMING_SNAKE_CASE_ ) for row in reader: lowercase_ = row['''model'''] self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) ) self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) ) if can_convert_to_int(row['''result'''] ): # value is not None lowercase_ = int(row['''result'''] ) elif can_convert_to_float(row['''result'''] ): # value is not None lowercase_ = float(row['''result'''] ) def _lowercase ( self : Dict ) -> Optional[Any]: lowercase_ , lowercase_ = plt.subplots() lowercase_ = '''Time usage''' if self.args.is_time else '''Memory usage''' lowercase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference''' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('''log''' ) ax.set_yscale('''log''' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): lowercase_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) ) lowercase_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) ) lowercase_ = self.result_dict[model_name]['''result'''] ((lowercase_) , (lowercase_)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) lowercase_ = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: lowercase_ = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=SCREAMING_SNAKE_CASE_ , ) else: lowercase_ = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((lowercase_) , (lowercase_)) = ( ('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''') ) lowercase_ = np.asarray(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[: len(SCREAMING_SNAKE_CASE_ )] plt.scatter( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''--''' ) title_str += f''' {label_model_name} vs.''' lowercase_ = title_str[:-4] lowercase_ = '''Time in s''' if self.args.is_time else '''Memory in MB''' # plot plt.title(SCREAMING_SNAKE_CASE_ ) plt.xlabel(SCREAMING_SNAKE_CASE_ ) plt.ylabel(SCREAMING_SNAKE_CASE_ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def a ( ): '''simple docstring''' lowercase_ = HfArgumentParser(snake_case__ ) lowercase_ = parser.parse_args_into_dataclasses()[0] lowercase_ = Plot(args=snake_case__ ) plot.plot() if __name__ == "__main__": main()
30
from __future__ import annotations def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase_ , lowercase_ = array[indexa], array[indexa] def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) for i in range(snake_case__ , low + middle ): comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ ) def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 ) bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
30
1
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class lowercase__( unittest.TestCase ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=9_9 , SCREAMING_SNAKE_CASE_ : Tuple=3_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_6 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : str="block_sparse" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , ) -> int: lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_attention_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_choices lowercase_ = rescale_embeddings lowercase_ = attention_type lowercase_ = use_bias lowercase_ = block_size lowercase_ = num_random_blocks def _lowercase ( self : Optional[int] ) -> int: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_attention_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs lowercase_ = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask, } return config, inputs_dict @require_flax class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[Any] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) a :str = False a :Tuple = False def _lowercase ( self : int ) -> Optional[int]: lowercase_ = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def _lowercase ( self : List[str] ) -> Union[str, Any]: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def _lowercase ( self : Dict ) -> Optional[int]: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def _lowercase ( self : List[Any] ) -> Optional[int]: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def _lowercase ( self : Union[str, Any] ) -> Dict: super().test_hidden_states_output() @slow def _lowercase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase_ = model_class_name.from_pretrained('''google/bigbird-roberta-base''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def _lowercase ( self : Dict ) -> Any: lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Tuple ): return model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest('''JIT Enabled''' ): lowercase_ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase_ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=1e-5 , SCREAMING_SNAKE_CASE_ : int="outputs" , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith('''outputs.attentions''' ): return else: super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
from __future__ import annotations from collections.abc import MutableSequence class lowercase__: """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None: if len(SCREAMING_SNAKE_CASE_ ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = degree def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: if self.degree > polynomial_a.degree: lowercase_ = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ ) else: lowercase_ = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : int ) -> Polynomial: return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: lowercase_ = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float: lowercase_ = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Tuple ) -> str: lowercase_ = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ ) return polynomial def __repr__( self : Optional[Any] ) -> str: return self.__str__() def _lowercase ( self : int ) -> Polynomial: lowercase_ = [0] * self.degree for i in range(self.degree ): lowercase_ = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial: lowercase_ = [0] * (self.degree + 2) lowercase_ = constant for i in range(self.degree + 1 ): lowercase_ = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ ) def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool: return not self.__eq__(SCREAMING_SNAKE_CASE_ )
30
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} __a = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } __a = { 'allenai/longformer-base-4096': 4_0_9_6, 'allenai/longformer-large-4096': 4_0_9_6, 'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6, 'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6, 'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def a ( ): '''simple docstring''' lowercase_ = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowercase_ = bs[:] lowercase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case__ ) cs.append(2**8 + n ) n += 1 lowercase_ = [chr(snake_case__ ) for n in cs] return dict(zip(snake_case__ , snake_case__ ) ) def a ( snake_case__: Union[str, Any] ): '''simple docstring''' lowercase_ = set() lowercase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase_ = char return pairs class lowercase__( UpperCAmelCase ): """simple docstring""" a :List[Any] = VOCAB_FILES_NAMES a :List[str] = PRETRAINED_VOCAB_FILES_MAP a :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a :Dict = ['input_ids', 'attention_mask'] def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]="replace" , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="<unk>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<mask>" , SCREAMING_SNAKE_CASE_ : str=False , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Any: lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: lowercase_ = json.load(SCREAMING_SNAKE_CASE_ ) lowercase_ = {v: k for k, v in self.encoder.items()} lowercase_ = errors # how to handle errors in decoding lowercase_ = bytes_to_unicode() lowercase_ = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: lowercase_ = merges_handle.read().split('''\n''' )[1:-1] lowercase_ = [tuple(merge.split() ) for merge in bpe_merges] lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) lowercase_ = {} lowercase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase_ = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _lowercase ( self : str ) -> List[Any]: return len(self.encoder ) def _lowercase ( self : Union[str, Any] ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]: if token in self.cache: return self.cache[token] lowercase_ = tuple(SCREAMING_SNAKE_CASE_ ) lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowercase_ , lowercase_ = bigram lowercase_ = [] lowercase_ = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase_ = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase_ = tuple(SCREAMING_SNAKE_CASE_ ) lowercase_ = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ ) lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ ) lowercase_ = word return word def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]: lowercase_ = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): lowercase_ = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) return bpe_tokens def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]: return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]: return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]: lowercase_ = ''''''.join(SCREAMING_SNAKE_CASE_ ) lowercase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase_ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) lowercase_ = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowercase_ = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase_ = [self.cls_token_id] lowercase_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int]=False , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> str: lowercase_ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): lowercase_ = ''' ''' + text return (text, kwargs)
30
import itertools import math def a ( snake_case__: int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( ): '''simple docstring''' lowercase_ = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def a ( snake_case__: int = 10_001 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) ) if __name__ == "__main__": print(f"{solution() = }")
30
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json', 'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json', } class lowercase__( UpperCAmelCase ): """simple docstring""" a :Tuple = 'luke' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str=5_0_2_6_7 , SCREAMING_SNAKE_CASE_ : Dict=5_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE_ : List[str]=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : int=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=1e-12 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Union[str, Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = entity_vocab_size lowercase_ = hidden_size lowercase_ = entity_emb_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = use_entity_aware_attention lowercase_ = classifier_dropout
30
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
30
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowercase__( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self : Tuple ) -> str: torch.manual_seed(0 ) lowercase_ = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model @property def _lowercase ( self : int ) -> Any: torch.manual_seed(0 ) lowercase_ = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , ) return model @property def _lowercase ( self : str ) -> List[str]: torch.manual_seed(0 ) lowercase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str ) -> Tuple: lowercase_ = self.dummy_uncond_unet lowercase_ = DDIMScheduler() lowercase_ = self.dummy_vq_model lowercase_ = LDMPipeline(unet=SCREAMING_SNAKE_CASE_ , vqvae=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) ldm.to(SCREAMING_SNAKE_CASE_ ) ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.manual_seed(0 ) lowercase_ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''numpy''' ).images lowercase_ = torch.manual_seed(0 ) lowercase_ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''numpy''' , return_dict=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = image[0, -3:, -3:, -1] lowercase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowercase_ = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] ) lowercase_ = 1e-2 if torch_device != '''mps''' else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[Any] ) -> Any: lowercase_ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' ) ldm.to(SCREAMING_SNAKE_CASE_ ) ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.manual_seed(0 ) lowercase_ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , output_type='''numpy''' ).images lowercase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) lowercase_ = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] ) lowercase_ = 1e-2 if torch_device != '''mps''' else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
30
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __a = logging.get_logger(__name__) __a = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config is None: assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) lowercase_ = self.model.config else: lowercase_ = config lowercase_ = data_args lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' ''' padding..''' ) if self.args.label_smoothing == 0: lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowercase_ = label_smoothed_nll_loss def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: if self.optimizer is None: lowercase_ = ['''bias''', '''LayerNorm.weight'''] lowercase_ = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] lowercase_ = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowercase_ = Adafactor lowercase_ = {'''scale_parameter''': False, '''relative_step''': False} else: lowercase_ = AdamW lowercase_ = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } lowercase_ = self.args.learning_rate if self.sharded_ddp: lowercase_ = OSS( params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) else: lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.lr_scheduler is None: lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict: lowercase_ = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowercase_ = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowercase_ = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) return scheduler def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2] else: # compute label smoothed loss lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]: lowercase_ = inputs.pop('''labels''' ) lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return loss def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ ) lowercase_ = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowercase_ = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) lowercase_ = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowercase_ = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple: # If PAD token is not defined at least EOS token has to be defined lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' f''' padded to `max_length`={max_length}''' ) lowercase_ = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowercase_ = tensor return padded_tensor
30
1
__a = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.6_0217_6634E-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.35_5818, } def a ( snake_case__: str , snake_case__: str , snake_case__: float ): '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowercase_ = ( F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' F'''Valid values are: {', '.join(snake_case__ )}''' ) raise ValueError(snake_case__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
30
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = 1_0 def _lowercase ( self : int ) -> List[str]: lowercase_ = [1, 2, 3, 4] lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[Any]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> List[Any]: lowercase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = '''''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) lowercase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = ['''It was the best of times.'''] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ = torch.tensor([1, 2, 3, 4] ) lowercase_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() ) def _lowercase ( self : int ) -> Dict: lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def _lowercase ( self : List[str] ) -> Tuple: lowercase_ = 1_0_1 lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'andreasmadsen/efficient_mlm_m0.40': ( 'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json' ), } class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = 'roberta-prelayernorm' def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]=5_0_2_6_5 , SCREAMING_SNAKE_CASE_ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : int="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1e-12 , SCREAMING_SNAKE_CASE_ : List[str]=1 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Any="absolute" , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : int , ) -> Tuple: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = classifier_dropout class lowercase__( UpperCAmelCase ): """simple docstring""" @property def _lowercase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
30
def a ( snake_case__: list ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] lowercase_ = [] def generate(snake_case__: int , snake_case__: list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowercase_ , lowercase_ = arr[k - 1], arr[i] else: # k is odd lowercase_ , lowercase_ = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item) for item in user_input.split(',')] print(heaps(arr))
30
1
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging __a = logging.get_logger(__name__) logging.set_verbosity_info() def a ( snake_case__: str , snake_case__: str ): '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: lowercase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ ) lowercase_ , lowercase_ = XLMProphetNetForConditionalGeneration.from_pretrained( snake_case__ , output_loading_info=snake_case__ ) else: lowercase_ = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ ) lowercase_ , lowercase_ = ProphetNetForConditionalGeneration.from_pretrained( snake_case__ , output_loading_info=snake_case__ ) lowercase_ = ['''key_proj''', '''value_proj''', '''query_proj'''] lowercase_ = { '''self_attn''': '''ngram_self_attn''', '''cross_attn''': '''encoder_attn''', '''cross_attn_layer_norm''': '''encoder_attn_layer_norm''', '''feed_forward_layer_norm''': '''final_layer_norm''', '''feed_forward''': '''''', '''intermediate''': '''fc1''', '''output''': '''fc2''', '''key_proj''': '''k_proj''', '''query_proj''': '''q_proj''', '''value_proj''': '''v_proj''', '''word_embeddings''': '''embed_tokens''', '''embeddings_layer_norm''': '''emb_layer_norm''', '''relative_pos_embeddings''': '''relative_linear''', '''ngram_embeddings''': '''ngram_input_embed''', '''position_embeddings''': '''embed_positions''', } for key in loading_info["missing_keys"]: lowercase_ = key.split('''.''' ) if attributes[0] == "lm_head": lowercase_ = prophet lowercase_ = prophet_old else: lowercase_ = prophet.prophetnet lowercase_ = prophet_old.model lowercase_ = False for attribute in attributes: if attribute in mapping: lowercase_ = mapping[attribute] if not hasattr(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0: lowercase_ = attribute elif hasattr(snake_case__ , snake_case__ ): lowercase_ = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase_ = old_model.weight logger.info(F'''{attribute} is initialized.''' ) lowercase_ = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase_ = old_model.bias logger.info(F'''{attribute} is initialized''' ) lowercase_ = True break elif attribute in special_keys and hasattr(snake_case__ , '''in_proj_weight''' ): lowercase_ = old_model.in_proj_weight.shape[0] // 3 lowercase_ = getattr(snake_case__ , snake_case__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowercase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowercase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowercase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowercase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowercase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowercase_ = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowercase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] ) lowercase_ = True break if attribute.isdigit(): lowercase_ = model[int(snake_case__ )] lowercase_ = old_model[int(snake_case__ )] else: lowercase_ = getattr(snake_case__ , snake_case__ ) if old_attribute == "": lowercase_ = old_model else: if not hasattr(snake_case__ , snake_case__ ): raise ValueError(F'''{old_model} does not have {old_attribute}''' ) lowercase_ = getattr(snake_case__ , snake_case__ ) if not is_key_init: raise ValueError(F'''{key} was not correctly initialized!''' ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(snake_case__ ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
30
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , ) lowercase_ = parser.parse_args() return args def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ): '''simple docstring''' if not len(snake_case__ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowercase_ , lowercase_ = imgs[0].size lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) ) lowercase_ , lowercase_ = grid.size for i, img in enumerate(snake_case__ ): grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) ) return grid def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ): '''simple docstring''' lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ ) lowercase_ = pipeline( snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images lowercase_ = int(math.sqrt(snake_case__ ) ) lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __a = parse_args() # Load models and create wrapper for stable diffusion __a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') __a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') __a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') __a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') __a = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __a = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): __a = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: __a = unet.to(torch.device('cuda', args.cuda_id)) __a = pipeline.to(unet.device) __a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) __a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
30
1
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :str = IFInpaintingSuperResolutionPipeline a :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} a :int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} ) a :Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'} def _lowercase ( self : List[Any] ) -> List[str]: return self._get_superresolution_dummy_components() def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ) -> int: if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ): lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) lowercase_ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) lowercase_ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def _lowercase ( self : List[Any] ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _lowercase ( self : Optional[Any] ) -> Optional[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def _lowercase ( self : str ) -> Union[str, Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def _lowercase ( self : Tuple ) -> List[str]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _lowercase ( self : str ) -> Tuple: self._test_save_load_local() def _lowercase ( self : Union[str, Any] ) -> List[str]: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __a = logging.get_logger(__name__) @dataclass class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[Any] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : int , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ = deprecated_arg[3:] setattr(self , SCREAMING_SNAKE_CASE_ , not kwargs.pop(SCREAMING_SNAKE_CASE_ ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ = kwargs.pop('''torchscript''' , self.torchscript ) lowercase_ = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) lowercase_ = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**SCREAMING_SNAKE_CASE_ ) a :bool = field(default=UpperCAmelCase , metadata={'help': 'Trace the models using torchscript'} ) a :bool = field(default=UpperCAmelCase , metadata={'help': 'Print Xla/PyTorch tpu metrics'} ) a :str = field( default='O1' , metadata={ 'help': ( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ' 'See details at https://nvidia.github.io/apex/amp.html' ) } , ) @cached_property def _lowercase ( self : Any ) -> Tuple["torch.device", int]: requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: lowercase_ = torch.device('''cpu''' ) lowercase_ = 0 elif is_torch_tpu_available(): lowercase_ = xm.xla_device() lowercase_ = 0 else: lowercase_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowercase_ = torch.cuda.device_count() return device, n_gpu @property def _lowercase ( self : List[Any] ) -> Union[str, Any]: return is_torch_tpu_available() and self.tpu @property def _lowercase ( self : List[Any] ) -> int: requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def _lowercase ( self : List[Any] ) -> "torch.device": requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def _lowercase ( self : Any ) -> int: requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def _lowercase ( self : Optional[Any] ) -> Dict: return self.n_gpu > 0
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['DeiTFeatureExtractor'] __a = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ) -> Optional[int]: if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE_ , ) assert hasattr(self , '''env''' ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]: lowercase_ = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}''' # distributed data settings lowercase_ = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=SCREAMING_SNAKE_CASE_ , instance_count=SCREAMING_SNAKE_CASE_ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE_ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE_ , py_version='''py36''' , ) def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: TrainingJobAnalytics(SCREAMING_SNAKE_CASE_ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(2,)] ) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: # create estimator lowercase_ = self.create_estimator(SCREAMING_SNAKE_CASE_ ) # run training estimator.fit() # result dataframe lowercase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowercase_ = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE_ )
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __a = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import operator as op __a = 'scaler.pt' __a = 'pytorch_model' __a = 'random_states' __a = 'optimizer' __a = 'scheduler' __a = 'pytorch_model.bin' __a = 'pytorch_model.bin.index.json' __a = 'model.safetensors' __a = 'model.safetensors.index.json' __a = '1.10.2' __a = 'py38' __a = '4.17.0' __a = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge'] __a = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2'] __a = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP'] __a = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH'] __a = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] __a = '2.0.1' __a = ['pdsh', 'standard', 'openmpi', 'mvapich'] __a = ['default', 'reduce-overhead', 'max-autotune'] __a = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 __a = [ 'nnodes', 'nproc_per_node', 'rdzv_backend', 'rdzv_endpoint', 'rdzv_id', 'rdzv_conf', 'standalone', 'max_restarts', 'monitor_interval', 'start_method', 'role', 'module', 'm', 'no_python', 'run_path', 'log_dir', 'r', 'redirects', 't', 'tee', 'node_rank', 'master_addr', 'master_port', ] __a = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM'] __a = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
30
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig __a = logging.get_logger(__name__) # General docstring __a = 'RegNetConfig' # Base docstring __a = 'facebook/regnet-y-040' __a = [1, 1_0_8_8, 7, 7] # Image classification docstring __a = 'facebook/regnet-y-040' __a = 'tabby, tabby cat' __a = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) lowercase_ = ACTaFN[activation] if activation is not None else tf.identity def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_channels lowercase_ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]: lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) ) lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ ) class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) lowercase_ = [ tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) for layer_module in self.attention: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden_state * pooled return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer lowercase_ = [ # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ), *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int: for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention: lowercase_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ ) if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ ) @keras_serializable class lowercase__( tf.keras.layers.Layer ): """simple docstring""" a :str = RegNetConfig def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' ) lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) @unpack_inputs def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.encoder( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = encoder_outputs[0] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) # Change to NCHW output format have uniformity in the modules lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Tuple = RegNetConfig a :Any = 'regnet' a :List[str] = 'pixel_values' @property def _lowercase ( self : List[str] ) -> str: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} __a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' __a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_labels lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) # classification head lowercase_ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = outputs.pooler_output if return_dict else outputs[1] lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ ) lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ ) lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) if not return_dict: lowercase_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
30
1
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING __a = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) requires_backends(self , '''decord''' ) self.check_model_type(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Union[str, Any]: lowercase_ = {} if frame_sampling_rate is not None: lowercase_ = frame_sampling_rate if num_frames is not None: lowercase_ = num_frames lowercase_ = {} if top_k is not None: lowercase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]: return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 ) -> int: if num_frames is None: lowercase_ = self.model.config.num_frames if video.startswith('''http://''' ) or video.startswith('''https://''' ): lowercase_ = BytesIO(requests.get(SCREAMING_SNAKE_CASE_ ).content ) lowercase_ = VideoReader(SCREAMING_SNAKE_CASE_ ) videoreader.seek(0 ) lowercase_ = 0 lowercase_ = num_frames * frame_sampling_rate - 1 lowercase_ = np.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num=SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) lowercase_ = videoreader.get_batch(SCREAMING_SNAKE_CASE_ ).asnumpy() lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) return model_inputs def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str: lowercase_ = self.model(**SCREAMING_SNAKE_CASE_ ) return model_outputs def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=5 ) -> Optional[Any]: if top_k > self.model.config.num_labels: lowercase_ = self.model.config.num_labels if self.framework == "pt": lowercase_ = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ = probs.topk(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowercase_ = scores.tolist() lowercase_ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
30
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __a = logging.get_logger(__name__) def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ): '''simple docstring''' # Recurse if needed if "." in tensor_name: lowercase_ = tensor_name.split('''.''' ) for split in splits[:-1]: lowercase_ = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) lowercase_ = new_module lowercase_ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) lowercase_ = tensor_name in module._buffers lowercase_ = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) lowercase_ = False lowercase_ = False if is_buffer or not is_bitsandbytes_available(): lowercase_ = False lowercase_ = False else: lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: lowercase_ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to('''cpu''' ) if value.dtype == torch.inta: lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: lowercase_ = torch.tensor(snake_case__ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: lowercase_ = new_value.T lowercase_ = old_value.__dict__ if is_abit: lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) lowercase_ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to(snake_case__ ) else: lowercase_ = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: lowercase_ = new_value else: lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) lowercase_ = new_value def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ): '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: lowercase_ = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): lowercase_ , lowercase_ = module.weight.shape else: lowercase_ = module.in_features lowercase_ = module.out_features if quantization_config.quantization_method() == "llm_int8": lowercase_ = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) lowercase_ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: lowercase_ = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) lowercase_ = True # Store the module class in case we need to transpose the weight later lowercase_ = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ): '''simple docstring''' lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def a ( *snake_case__: str , **snake_case__: Dict ): '''simple docstring''' warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def a ( *snake_case__: Any , **snake_case__: List[Any] ): '''simple docstring''' warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def a ( snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() lowercase_ = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowercase_ = sum(snake_case__ , [] ) lowercase_ = len(snake_case__ ) > 0 # Check if it is a base model lowercase_ = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowercase_ = list(model.named_children() ) lowercase_ = [list_modules[-1][0]] # add last module together with tied weights lowercase_ = set(snake_case__ ) - set(snake_case__ ) lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys lowercase_ = ['''.weight''', '''.bias'''] lowercase_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowercase_ = name.replace(snake_case__ , '''''' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
30
1
class lowercase__: """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ) -> List[str]: lowercase_ = data lowercase_ = previous lowercase_ = next_node def __str__( self : Tuple ) -> str: return f'''{self.data}''' def _lowercase ( self : Any ) -> int: return self.data def _lowercase ( self : Union[str, Any] ) -> Optional[int]: return self.next def _lowercase ( self : Tuple ) -> Any: return self.previous class lowercase__: """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str: lowercase_ = head def __iter__( self : Union[str, Any] ) -> str: return self def _lowercase ( self : List[str] ) -> Tuple: if not self.current: raise StopIteration else: lowercase_ = self.current.get_data() lowercase_ = self.current.get_next() return value class lowercase__: """simple docstring""" def __init__( self : Union[str, Any] ) -> Any: lowercase_ = None # First node in list lowercase_ = None # Last node in list def __str__( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = self.head lowercase_ = [] while current is not None: nodes.append(current.get_data() ) lowercase_ = current.get_next() return " ".join(str(SCREAMING_SNAKE_CASE_ ) for node in nodes ) def __contains__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]: lowercase_ = self.head while current: if current.get_data() == value: return True lowercase_ = current.get_next() return False def __iter__( self : Dict ) -> Tuple: return LinkedListIterator(self.head ) def _lowercase ( self : Any ) -> Optional[Any]: if self.head: return self.head.get_data() return None def _lowercase ( self : Any ) -> Any: if self.tail: return self.tail.get_data() return None def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Node ) -> None: if self.head is None: lowercase_ = node lowercase_ = node else: self.insert_before_node(self.head , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Node ) -> None: if self.head is None: self.set_head(SCREAMING_SNAKE_CASE_ ) else: self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> None: lowercase_ = Node(SCREAMING_SNAKE_CASE_ ) if self.head is None: self.set_head(SCREAMING_SNAKE_CASE_ ) else: self.set_tail(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Node , SCREAMING_SNAKE_CASE_ : Node ) -> None: lowercase_ = node lowercase_ = node.previous if node.get_previous() is None: lowercase_ = node_to_insert else: lowercase_ = node_to_insert lowercase_ = node_to_insert def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Node , SCREAMING_SNAKE_CASE_ : Node ) -> None: lowercase_ = node lowercase_ = node.next if node.get_next() is None: lowercase_ = node_to_insert else: lowercase_ = node_to_insert lowercase_ = node_to_insert def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: lowercase_ = 1 lowercase_ = Node(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.head while node: if current_position == position: self.insert_before_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return current_position += 1 lowercase_ = node.next self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int ) -> Node: lowercase_ = self.head while node: if node.get_data() == item: return node lowercase_ = node.get_next() raise Exception('''Node not found''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: if (node := self.get_node(SCREAMING_SNAKE_CASE_ )) is not None: if node == self.head: lowercase_ = self.head.get_next() if node == self.tail: lowercase_ = self.tail.get_previous() self.remove_node_pointers(SCREAMING_SNAKE_CASE_ ) @staticmethod def _lowercase ( SCREAMING_SNAKE_CASE_ : Node ) -> None: if node.get_next(): lowercase_ = node.previous if node.get_previous(): lowercase_ = node.next lowercase_ = None lowercase_ = None def _lowercase ( self : Tuple ) -> Tuple: return self.head is None def a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
30
import argparse import os import re __a = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __a = re.compile(r'\s*\(\s*"(\S[^"]+)"') def a ( snake_case__: str , snake_case__: bool = False ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: lowercase_ = f.read() lowercase_ = content.split('''\n''' ) lowercase_ = [] lowercase_ = 0 while line_idx < len(snake_case__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowercase_ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowercase_ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(snake_case__ ) ) elif "\n".join(snake_case__ ) != content: return True def a ( snake_case__: bool = False ): '''simple docstring''' lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )] lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames] if not overwrite and any(snake_case__ ): lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix''' ''' this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __a = parser.parse_args() sort_all_auto_mappings(not args.check_only)
30
1
from __future__ import annotations class lowercase__: """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : int ) -> None: lowercase_ = data lowercase_ = None lowercase_ = None def a ( snake_case__: Node | None ): # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def a ( snake_case__: Node | None ): '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def a ( snake_case__: Node ): '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def a ( ): # Main function for testing. '''simple docstring''' lowercase_ = Node(1 ) lowercase_ = Node(2 ) lowercase_ = Node(3 ) lowercase_ = Node(4 ) lowercase_ = Node(5 ) lowercase_ = Node(6 ) lowercase_ = Node(7 ) lowercase_ = Node(8 ) lowercase_ = Node(9 ) print(is_full_binary_tree(snake_case__ ) ) print(depth_of_tree(snake_case__ ) ) print('''Tree is: ''' ) display(snake_case__ ) if __name__ == "__main__": main()
30
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if index == number_of_items: return 0 lowercase_ = 0 lowercase_ = 0 lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 ) if weights[index] <= max_weight: lowercase_ = values[index] + knapsack( snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 ) return max(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __a = logging.get_logger(__name__) # General docstring __a = 'PoolFormerConfig' # Base docstring __a = 'sail/poolformer_s12' __a = [1, 5_1_2, 7, 7] # Image classification docstring __a = 'sail/poolformer_s12' __a = 'tabby, tabby cat' __a = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def a ( snake_case__: List[str] , snake_case__: float = 0.0 , snake_case__: bool = False ): '''simple docstring''' if drop_prob == 0.0 or not training: return input lowercase_ = 1 - drop_prob lowercase_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowercase_ = keep_prob + torch.rand(snake_case__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize lowercase_ = input.div(snake_case__ ) * random_tensor return output class lowercase__( nn.Module ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[float] = None ) -> None: super().__init__() lowercase_ = drop_prob def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor: return drop_path(SCREAMING_SNAKE_CASE_ , self.drop_prob , self.training ) def _lowercase ( self : str ) -> str: return "p={}".format(self.drop_prob ) class lowercase__( nn.Module ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any=None ) -> int: super().__init__() lowercase_ = patch_size if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ) else (patch_size, patch_size) lowercase_ = stride if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ) else (stride, stride) lowercase_ = padding if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ) else (padding, padding) lowercase_ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) lowercase_ = norm_layer(SCREAMING_SNAKE_CASE_ ) if norm_layer else nn.Identity() def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: lowercase_ = self.projection(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.norm(SCREAMING_SNAKE_CASE_ ) return embeddings class lowercase__( nn.GroupNorm ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ) -> List[str]: super().__init__(1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class lowercase__( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]: super().__init__() lowercase_ = nn.AvgPoolad(SCREAMING_SNAKE_CASE_ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]: return self.pool(SCREAMING_SNAKE_CASE_ ) - hidden_states class lowercase__( nn.Module ): """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]: super().__init__() lowercase_ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) lowercase_ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) lowercase_ = PoolFormerDropPath(SCREAMING_SNAKE_CASE_ ) if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE_ ): lowercase_ = ACTaFN[config.hidden_act] else: lowercase_ = config.hidden_act def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]: lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.act_fn(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.drop(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.drop(SCREAMING_SNAKE_CASE_ ) return hidden_states class lowercase__( nn.Module ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]: super().__init__() lowercase_ = PoolFormerPooling(SCREAMING_SNAKE_CASE_ ) lowercase_ = PoolFormerOutput(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE_ ) lowercase_ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE_ ) # Useful for training neural nets lowercase_ = PoolFormerDropPath(SCREAMING_SNAKE_CASE_ ) if drop_path > 0.0 else nn.Identity() lowercase_ = config.use_layer_scale if config.use_layer_scale: lowercase_ = nn.Parameter( config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE_) ) , requires_grad=SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.Parameter( config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE_) ) , requires_grad=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict: if self.use_layer_scale: lowercase_ = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowercase_ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE_ ) lowercase_ = () lowercase_ = self.output(self.after_norm(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowercase_ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE_ ) lowercase_ = (output,) + outputs return outputs else: lowercase_ = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE_ ) ) ) # First residual connection lowercase_ = pooling_output + hidden_states lowercase_ = () # Second residual connection inside the PoolFormerOutput block lowercase_ = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE_ ) ) ) lowercase_ = hidden_states + layer_output lowercase_ = (output,) + outputs return outputs class lowercase__( nn.Module ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict: super().__init__() lowercase_ = config # stochastic depth decay rule lowercase_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowercase_ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowercase_ = nn.ModuleList(SCREAMING_SNAKE_CASE_ ) # Transformer blocks lowercase_ = [] lowercase_ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowercase_ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( SCREAMING_SNAKE_CASE_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = nn.ModuleList(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : int=True ) -> Dict: lowercase_ = () if output_hidden_states else None lowercase_ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowercase_ , lowercase_ = layers # Get patch embeddings from hidden_states lowercase_ = embedding_layer(SCREAMING_SNAKE_CASE_ ) # Send the embeddings through the blocks for _, blk in enumerate(SCREAMING_SNAKE_CASE_ ): lowercase_ = blk(SCREAMING_SNAKE_CASE_ ) lowercase_ = layer_outputs[0] if output_hidden_states: lowercase_ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :List[str] = PoolFormerConfig a :int = 'poolformer' a :List[Any] = 'pixel_values' a :Union[str, Any] = True def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]: if isinstance(SCREAMING_SNAKE_CASE_ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(SCREAMING_SNAKE_CASE_ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Any: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = value __a = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __a = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( 'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: super().__init__(SCREAMING_SNAKE_CASE_ ) lowercase_ = config lowercase_ = PoolFormerEncoder(SCREAMING_SNAKE_CASE_ ) # Initialize weights and apply final processing self.post_init() def _lowercase ( self : Dict ) -> Any: return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) lowercase_ = self.encoder( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , ) lowercase_ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , ) class lowercase__( nn.Module ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Any: super().__init__() lowercase_ = nn.Linear(config.hidden_size , config.hidden_size ) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]: lowercase_ = self.dense(SCREAMING_SNAKE_CASE_ ) return output @add_start_docstrings( '\n PoolFormer Model transformer with an image classification head on top\n ' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]: super().__init__(SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_labels lowercase_ = PoolFormerModel(SCREAMING_SNAKE_CASE_ ) # Final norm lowercase_ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowercase_ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.poolformer( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , ) lowercase_ = outputs[0] lowercase_ = self.classifier(self.norm(SCREAMING_SNAKE_CASE_ ).mean([-2, -1] ) ) lowercase_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase_ = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase_ = '''single_label_classification''' else: lowercase_ = '''multi_label_classification''' if self.config.problem_type == "regression": lowercase_ = MSELoss() if self.num_labels == 1: lowercase_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif self.config.problem_type == "single_label_classification": lowercase_ = CrossEntropyLoss() lowercase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase_ = BCEWithLogitsLoss() lowercase_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: lowercase_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
30
import argparse from collections import defaultdict import yaml __a = 'docs/source/en/_toctree.yml' def a ( snake_case__: Dict ): '''simple docstring''' lowercase_ = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 lowercase_ = [key for key, value in counts.items() if value > 1] lowercase_ = [] for duplicate_key in duplicates: lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def a ( snake_case__: List[Any]=False ): '''simple docstring''' with open(snake_case__ , encoding='''utf-8''' ) as f: lowercase_ = yaml.safe_load(f.read() ) # Get to the API doc lowercase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase_ = content[api_idx]['''sections'''] # Then to the model doc lowercase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowercase_ = api_doc[model_idx]['''sections'''] lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section] lowercase_ = False for idx, modality_doc in modalities_docs: lowercase_ = modality_doc['''sections'''] lowercase_ = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: lowercase_ = True if overwrite: lowercase_ = new_modality_doc if diff: if overwrite: lowercase_ = model_doc lowercase_ = api_doc with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a = parser.parse_args() check_model_doc(args.fix_and_overwrite)
30
1
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Dict ) -> None: warnings.warn( '''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use VideoMAEImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
30
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[str, Any] = 'upernet' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = backbone_config.get('''model_type''' ) lowercase_ = CONFIG_MAPPING[backbone_model_type] lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) lowercase_ = backbone_config lowercase_ = hidden_size lowercase_ = initializer_range lowercase_ = pool_scales lowercase_ = use_auxiliary_head lowercase_ = auxiliary_loss_weight lowercase_ = auxiliary_in_channels lowercase_ = auxiliary_channels lowercase_ = auxiliary_num_convs lowercase_ = auxiliary_concat_input lowercase_ = loss_ignore_index def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.backbone_config.to_dict() lowercase_ = self.__class__.model_type return output
30
1
from __future__ import annotations def a ( snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' lowercase_ = [] lowercase_ , lowercase_ = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) lowercase_ = result + left + right return input_list def a ( snake_case__: list ): '''simple docstring''' if len(snake_case__ ) <= 1: return input_list lowercase_ = list(snake_case__ ) # iteration for two-way merging lowercase_ = 2 while p <= len(snake_case__ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(snake_case__ ) , snake_case__ ): lowercase_ = i lowercase_ = i + p - 1 lowercase_ = (low + high + 1) // 2 lowercase_ = merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # final merge of last two parts if p * 2 >= len(snake_case__ ): lowercase_ = i lowercase_ = merge(snake_case__ , 0 , snake_case__ , len(snake_case__ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() if user_input == "": __a = [] else: __a = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
30
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __a = 'sshleifer/bart-tiny-random' __a = 'patrickvonplaten/t5-tiny-random' @require_torch class lowercase__( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Any ) -> Tuple: return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _lowercase ( self : List[Any] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: with self.assertRaises(SCREAMING_SNAKE_CASE_ ): create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
30
1
def a ( snake_case__: float , snake_case__: int ): '''simple docstring''' if digit_amount > 0: return round(number - int(snake_case__ ) , snake_case__ ) return number - int(snake_case__ ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
30
def a ( snake_case__: int = 100 ): '''simple docstring''' lowercase_ = (n * (n + 1) // 2) ** 2 lowercase_ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"{solution() = }")
30
1
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __a = 2 class lowercase__: """simple docstring""" def __init__( self : Union[str, Any] , *, # begin keyword-only arguments SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="<unk>" , SCREAMING_SNAKE_CASE_ : List[str]=None , ) -> Optional[int]: lowercase_ , lowercase_ , lowercase_ , lowercase_ = bos, unk, pad, eos lowercase_ = [] lowercase_ = [] lowercase_ = {} lowercase_ = self.add_symbol(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.add_symbol(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.add_symbol(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.add_symbol(SCREAMING_SNAKE_CASE_ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(SCREAMING_SNAKE_CASE_ ) lowercase_ = len(self.symbols ) def __eq__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]: return self.indices == other.indices def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : int ) -> int: return len(self.symbols ) def __contains__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]: return sym in self.indices @classmethod def _lowercase ( cls : str , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]: lowercase_ = cls() d.add_from_file(SCREAMING_SNAKE_CASE_ ) return d def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Tuple=False ) -> Optional[Any]: if word in self.indices and not overwrite: lowercase_ = self.indices[word] lowercase_ = self.count[idx] + n return idx else: lowercase_ = len(self.symbols ) lowercase_ = idx self.symbols.append(SCREAMING_SNAKE_CASE_ ) self.count.append(SCREAMING_SNAKE_CASE_ ) return idx def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: return 0 def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): try: with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(SCREAMING_SNAKE_CASE_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(SCREAMING_SNAKE_CASE_ ) ) return lowercase_ = f.readlines() lowercase_ = self._load_meta(SCREAMING_SNAKE_CASE_ ) for line in lines[indices_start_line:]: try: lowercase_ , lowercase_ = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": lowercase_ = True lowercase_ , lowercase_ = line.rsplit(''' ''' , 1 ) else: lowercase_ = False lowercase_ = int(SCREAMING_SNAKE_CASE_ ) lowercase_ = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(SCREAMING_SNAKE_CASE_ ) ) self.add_symbol(SCREAMING_SNAKE_CASE_ , n=SCREAMING_SNAKE_CASE_ , overwrite=SCREAMING_SNAKE_CASE_ ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def a ( snake_case__: Tuple ): '''simple docstring''' # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} lowercase_ = dict((re.sub(r'''@@$''' , '''''' , snake_case__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , snake_case__ ), v) for k, v in d.items() ) lowercase_ = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] lowercase_ = d[k] # restore return da def a ( snake_case__: Union[str, Any] , snake_case__: Tuple ): '''simple docstring''' # prep if not os.path.exists(snake_case__ ): raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models lowercase_ = os.path.join(snake_case__ , '''checkpoint.pt''' ) if not os.path.isfile(snake_case__ ): raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' ) lowercase_ = torch.load(snake_case__ , map_location='''cpu''' ) lowercase_ = chkpt['''cfg''']['''model'''] # dicts lowercase_ = os.path.join(snake_case__ , '''dict.txt''' ) if not os.path.isfile(snake_case__ ): raise ValueError(F'''path to the file {dict_file} does not exist!''' ) lowercase_ = Dictionary.load(snake_case__ ) lowercase_ = rewrite_dict_keys(src_dict.indices ) lowercase_ = len(snake_case__ ) lowercase_ = os.path.join(snake_case__ , VOCAB_FILES_NAMES['''vocab_file'''] ) print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) ) # merges_file (bpecodes) lowercase_ = os.path.join(snake_case__ , '''bpecodes''' ) if not os.path.isfile(snake_case__ ): raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' ) lowercase_ = os.path.join(snake_case__ , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(snake_case__ , snake_case__ ) # model config lowercase_ = os.path.join(snake_case__ , '''config.json''' ) lowercase_ = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.0_2, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1e-1_2, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(F'''Generating {biogpt_model_config_file}''' ) with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) ) # tokenizer config lowercase_ = os.path.join(snake_case__ , snake_case__ ) lowercase_ = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 1_024, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(F'''Generating {biogpt_tokenizer_config_file}''' ) with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) ) # model lowercase_ = chkpt['''model'''] # remove unneeded keys lowercase_ = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(snake_case__ , snake_case__ ) lowercase_ = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): lowercase_ = model_state_dict.pop(snake_case__ ) else: lowercase_ = model_state_dict.pop(snake_case__ ) lowercase_ = BioGptConfig.from_pretrained(snake_case__ ) lowercase_ = BioGptForCausalLM(snake_case__ ) # check that it loads ok model_new.load_state_dict(snake_case__ ) # save lowercase_ = os.path.join(snake_case__ , snake_case__ ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(snake_case__ , snake_case__ ) print('''Conversion is done!''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
30
import logging from transformers.configuration_utils import PretrainedConfig __a = logging.getLogger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = 'masked_bert' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = pruning_method lowercase_ = mask_init lowercase_ = mask_scale
30
1
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING __a = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : int , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) requires_backends(self , '''vision''' ) self.check_model_type(SCREAMING_SNAKE_CASE_ ) def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: return {}, {}, {} def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str: lowercase_ = load_image(SCREAMING_SNAKE_CASE_ ) lowercase_ = image.size lowercase_ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) return model_inputs def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: lowercase_ = self.model(**SCREAMING_SNAKE_CASE_ ) return model_outputs def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = model_outputs.predicted_depth lowercase_ = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=SCREAMING_SNAKE_CASE_ ) lowercase_ = prediction.squeeze().cpu().numpy() lowercase_ = (output * 2_5_5 / np.max(SCREAMING_SNAKE_CASE_ )).astype('''uint8''' ) lowercase_ = Image.fromarray(SCREAMING_SNAKE_CASE_ ) lowercase_ = {} lowercase_ = predicted_depth lowercase_ = depth return output_dict
30
import os def a ( ): '''simple docstring''' lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
30
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def a ( ): '''simple docstring''' lowercase_ = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=snake_case__ ) lowercase_ = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=snake_case__ ) env_command_parser(subparsers=snake_case__ ) launch_command_parser(subparsers=snake_case__ ) tpu_command_parser(subparsers=snake_case__ ) test_command_parser(subparsers=snake_case__ ) # Let's go lowercase_ = parser.parse_args() if not hasattr(snake_case__ , '''func''' ): parser.print_help() exit(1 ) # Run args.func(snake_case__ ) if __name__ == "__main__": main()
30
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ): '''simple docstring''' lowercase_ = { '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) assert base_extractor.is_extractable(snake_case__ ) lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ): '''simple docstring''' lowercase_ = { '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } lowercase_ = input_paths[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) lowercase_ = Extractor.infer_extractor_format(snake_case__ ) assert extractor_format is not None lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(snake_case__ , snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_dot_dot''' directory.mkdir() lowercase_ = directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) ) return path @pytest.fixture def a ( snake_case__: int ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_sym_link''' directory.mkdir() lowercase_ = directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ ) with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , ) def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = { '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } lowercase_ = insecure_tar_files[insecure_tar_file] lowercase_ = tmp_path / '''extracted''' TarExtractor.extract(snake_case__ , snake_case__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def a ( snake_case__: Optional[int] ): '''simple docstring''' # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number lowercase_ = tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 lowercase_ = ( B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(snake_case__ ) assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
30
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[Any] = 'open-llama' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=1_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_1_0_0_8 , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Dict=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]="silu" , SCREAMING_SNAKE_CASE_ : List[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=1e-6 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Dict=None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Any: lowercase_ = vocab_size lowercase_ = max_position_embeddings lowercase_ = hidden_size lowercase_ = intermediate_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = initializer_range lowercase_ = rms_norm_eps lowercase_ = use_cache lowercase_ = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden_dropout_prob lowercase_ = attention_dropout_prob lowercase_ = use_stable_embedding lowercase_ = shared_input_output_embedding lowercase_ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def _lowercase ( self : Dict ) -> Tuple: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) lowercase_ = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE_ ) lowercase_ = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
30
from __future__ import annotations def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase_ , lowercase_ = array[indexa], array[indexa] def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) for i in range(snake_case__ , low + middle ): comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ ) def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 ) bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
30
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def a ( snake_case__: Optional[Any] , snake_case__: Any=False ): '''simple docstring''' lowercase_ = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') ) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') ) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') ) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) # fmt: on return rename_keys def a ( snake_case__: Union[str, Any] , snake_case__: Optional[int] , snake_case__: Optional[int]=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowercase_ = '''''' else: lowercase_ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) lowercase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase_ = in_proj_weight[ : config.hidden_size, : ] lowercase_ = in_proj_bias[: config.hidden_size] lowercase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase_ = in_proj_weight[ -config.hidden_size :, : ] lowercase_ = in_proj_bias[-config.hidden_size :] def a ( snake_case__: Tuple ): '''simple docstring''' lowercase_ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def a ( snake_case__: Optional[Any] , snake_case__: int , snake_case__: List[Any] ): '''simple docstring''' lowercase_ = dct.pop(snake_case__ ) lowercase_ = val def a ( ): '''simple docstring''' lowercase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def a ( snake_case__: List[str] , snake_case__: Optional[Any] , snake_case__: Tuple=False ): '''simple docstring''' lowercase_ = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=snake_case__ , ) lowercase_ = ViTHybridConfig(backbone_config=snake_case__ , image_size=384 , num_labels=1_000 ) lowercase_ = False # load original model from timm lowercase_ = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowercase_ = timm_model.state_dict() if base_model: remove_classification_head_(snake_case__ ) lowercase_ = create_rename_keys(snake_case__ , snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ ) lowercase_ = '''huggingface/label-files''' lowercase_ = '''imagenet-1k-id2label.json''' lowercase_ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) lowercase_ = {int(snake_case__ ): v for k, v in idalabel.items()} lowercase_ = idalabel lowercase_ = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": lowercase_ = ViTHybridModel(snake_case__ ).eval() else: lowercase_ = ViTHybridForImageClassification(snake_case__ ).eval() model.load_state_dict(snake_case__ ) # create image processor lowercase_ = create_transform(**resolve_data_config({} , model=snake_case__ ) ) lowercase_ = transform.transforms lowercase_ = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowercase_ = ViTHybridImageProcessor( do_resize=snake_case__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=snake_case__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase_ = prepare_img() lowercase_ = transform(snake_case__ ).unsqueeze(0 ) lowercase_ = processor(snake_case__ , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(snake_case__ , snake_case__ ) # verify logits with torch.no_grad(): lowercase_ = model(snake_case__ ) lowercase_ = outputs.logits print('''Predicted class:''' , logits.argmax(-1 ).item() ) if base_model: lowercase_ = timm_model.forward_features(snake_case__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(snake_case__ , outputs.pooler_output , atol=1e-3 ) else: lowercase_ = timm_model(snake_case__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case__ , outputs.logits , atol=1e-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_r50_s16_384', type=str, help='Name of the hybrid ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) __a = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
30
from __future__ import annotations from collections.abc import MutableSequence class lowercase__: """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None: if len(SCREAMING_SNAKE_CASE_ ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = degree def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: if self.degree > polynomial_a.degree: lowercase_ = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ ) else: lowercase_ = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : int ) -> Polynomial: return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial: lowercase_ = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float: lowercase_ = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Tuple ) -> str: lowercase_ = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ ) return polynomial def __repr__( self : Optional[Any] ) -> str: return self.__str__() def _lowercase ( self : int ) -> Polynomial: lowercase_ = [0] * self.degree for i in range(self.degree ): lowercase_ = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial: lowercase_ = [0] * (self.degree + 2) lowercase_ = constant for i in range(self.degree + 1 ): lowercase_ = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ ) def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool: return not self.__eq__(SCREAMING_SNAKE_CASE_ )
30
1
def a ( snake_case__: int = 10 , snake_case__: int = 22 ): '''simple docstring''' lowercase_ = range(1 , snake_case__ ) lowercase_ = range(1 , snake_case__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f"{solution(1_0, 2_2) = }")
30
import itertools import math def a ( snake_case__: int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( ): '''simple docstring''' lowercase_ = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def a ( snake_case__: int = 10_001 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) ) if __name__ == "__main__": print(f"{solution() = }")
30
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def a ( snake_case__: str , snake_case__: complex , snake_case__: str = "x" , snake_case__: float = 10**-10 , snake_case__: int = 1 , ): '''simple docstring''' lowercase_ = symbols(snake_case__ ) lowercase_ = lambdify(snake_case__ , snake_case__ ) lowercase_ = lambdify(snake_case__ , diff(snake_case__ , snake_case__ ) ) lowercase_ = starting_point while True: if diff_function(snake_case__ ) != 0: lowercase_ = prev_guess - multiplicity * func(snake_case__ ) / diff_function( snake_case__ ) else: raise ZeroDivisionError('''Could not find root''' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase_ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial # Find fourth Root of 5 print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}") # Find value of e print( 'The root of log(y) - 1 = 0 is ', f"{newton_raphson('log(y) - 1', 2, variable='y')}", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}", ) # Find root of cos(x) print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
30
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
30
1
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , ) lowercase_ = parser.parse_args() return args def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ): '''simple docstring''' if not len(snake_case__ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowercase_ , lowercase_ = imgs[0].size lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) ) lowercase_ , lowercase_ = grid.size for i, img in enumerate(snake_case__ ): grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) ) return grid def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ): '''simple docstring''' lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ ) lowercase_ = pipeline( snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images lowercase_ = int(math.sqrt(snake_case__ ) ) lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __a = parse_args() # Load models and create wrapper for stable diffusion __a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') __a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') __a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') __a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') __a = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __a = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): __a = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: __a = unet.to(torch.device('cuda', args.cuda_id)) __a = pipeline.to(unet.device) __a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) __a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
30
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __a = logging.get_logger(__name__) __a = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config is None: assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) lowercase_ = self.model.config else: lowercase_ = config lowercase_ = data_args lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' ''' padding..''' ) if self.args.label_smoothing == 0: lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowercase_ = label_smoothed_nll_loss def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: if self.optimizer is None: lowercase_ = ['''bias''', '''LayerNorm.weight'''] lowercase_ = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] lowercase_ = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowercase_ = Adafactor lowercase_ = {'''scale_parameter''': False, '''relative_step''': False} else: lowercase_ = AdamW lowercase_ = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } lowercase_ = self.args.learning_rate if self.sharded_ddp: lowercase_ = OSS( params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) else: lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.lr_scheduler is None: lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict: lowercase_ = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowercase_ = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowercase_ = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) return scheduler def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2] else: # compute label smoothed loss lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]: lowercase_ = inputs.pop('''labels''' ) lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return loss def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ ) lowercase_ = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowercase_ = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) lowercase_ = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowercase_ = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple: # If PAD token is not defined at least EOS token has to be defined lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' f''' padded to `max_length`={max_length}''' ) lowercase_ = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowercase_ = tensor return padded_tensor
30
1
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def a ( snake_case__: Union[dict, list, tuple, torch.Tensor] ): '''simple docstring''' lowercase_ = [] if isinstance(snake_case__ , snake_case__ ): for v in tree.values(): shapes.extend(_fetch_dims(snake_case__ ) ) elif isinstance(snake_case__ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(snake_case__ ) ) elif isinstance(snake_case__ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def a ( snake_case__: int , snake_case__: Tuple[int, ...] ): '''simple docstring''' lowercase_ = [] for d in reversed(snake_case__ ): idx.append(flat_idx % d ) lowercase_ = flat_idx // d return tuple(reversed(snake_case__ ) ) @torch.jit.ignore def a ( snake_case__: Sequence[int] , snake_case__: Sequence[int] , snake_case__: Sequence[int] , snake_case__: Optional[Sequence[bool]] = None , snake_case__: Optional[Sequence[bool]] = None , ): '''simple docstring''' # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(snake_case__: List[bool] ) -> None: lowercase_ = True for i in range(len(snake_case__ ) ): lowercase_ = -1 * (i + 1) l[reversed_idx] &= tally lowercase_ = l[reversed_idx] if start_edges is None: lowercase_ = [s == 0 for s in start] reduce_edge_list(snake_case__ ) if end_edges is None: lowercase_ = [e == (d - 1) for e, d in zip(snake_case__ , snake_case__ )] reduce_edge_list(snake_case__ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(snake_case__ ) == 0: return [()] elif len(snake_case__ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] lowercase_ = [] lowercase_ = [] # Dimensions common to start and end can be selected directly for s, e in zip(snake_case__ , snake_case__ ): if s == e: path_list.append(slice(snake_case__ , s + 1 ) ) else: break lowercase_ = tuple(snake_case__ ) lowercase_ = len(snake_case__ ) # start == end, and we're done if divergence_idx == len(snake_case__ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None lowercase_ = start[divergence_idx] return tuple( path + (slice(snake_case__ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None lowercase_ = end[divergence_idx] return tuple( path + (slice(snake_case__ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) lowercase_ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def a ( snake_case__: torch.Tensor , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' lowercase_ = t.shape[:no_batch_dims] lowercase_ = list(_flat_idx_to_idx(snake_case__ , snake_case__ ) ) # _get_minimal_slice_set is inclusive lowercase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case__ ) ) # Get an ordered list of slices to perform lowercase_ = _get_minimal_slice_set( snake_case__ , snake_case__ , snake_case__ , ) lowercase_ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def a ( snake_case__: Callable , snake_case__: Dict[str, Any] , snake_case__: int , snake_case__: int , snake_case__: bool = False , snake_case__: Any = None , snake_case__: bool = False , ): '''simple docstring''' if not (len(snake_case__ ) > 0): raise ValueError('''Must provide at least one input''' ) lowercase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case__ )] lowercase_ = tuple([max(snake_case__ ) for s in zip(*snake_case__ )] ) def _prep_inputs(snake_case__: torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: lowercase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) lowercase_ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: lowercase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t lowercase_ = tensor_tree_map(_prep_inputs , snake_case__ ) lowercase_ = None if _out is not None: lowercase_ = tensor_tree_map(lambda snake_case__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) lowercase_ = 1 for d in orig_batch_dims: flat_batch_dim *= d lowercase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(snake_case__: torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t lowercase_ = 0 lowercase_ = prepped_outputs for _ in range(snake_case__ ): # Chunk the input if not low_mem: lowercase_ = _select_chunk else: lowercase_ = partial( _chunk_slice , flat_start=snake_case__ , flat_end=min(snake_case__ , i + chunk_size ) , no_batch_dims=len(snake_case__ ) , ) lowercase_ = tensor_tree_map(snake_case__ , snake_case__ ) # Run the layer on the chunk lowercase_ = layer(**snake_case__ ) # Allocate space for the output if out is None: lowercase_ = tensor_tree_map(lambda snake_case__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case__ ) # Put the chunk in its pre-allocated space if isinstance(snake_case__ , snake_case__ ): def assign(snake_case__: dict , snake_case__: dict ) -> None: for k, v in da.items(): if isinstance(snake_case__ , snake_case__ ): assign(snake_case__ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: lowercase_ = da[k] assign(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): for xa, xa in zip(snake_case__ , snake_case__ ): if _add_into_out: xa[i : i + chunk_size] += xa else: lowercase_ = xa elif isinstance(snake_case__ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: lowercase_ = output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size lowercase_ = tensor_tree_map(lambda snake_case__ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case__ ) return out class lowercase__: """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 5_1_2 , ) -> Tuple: lowercase_ = max_chunk_size lowercase_ = None lowercase_ = None def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : int ) -> int: logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size lowercase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] lowercase_ = [c for c in candidates if c > min_chunk_size] lowercase_ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(SCREAMING_SNAKE_CASE_ : int ) -> bool: try: with torch.no_grad(): fn(*SCREAMING_SNAKE_CASE_ , chunk_size=SCREAMING_SNAKE_CASE_ ) return True except RuntimeError: return False lowercase_ = 0 lowercase_ = len(SCREAMING_SNAKE_CASE_ ) - 1 while i > min_viable_chunk_size_index: lowercase_ = test_chunk_size(candidates[i] ) if not viable: lowercase_ = (min_viable_chunk_size_index + i) // 2 else: lowercase_ = i lowercase_ = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Iterable , SCREAMING_SNAKE_CASE_ : Iterable ) -> bool: lowercase_ = True for aa, aa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )] lowercase_ = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )] consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: consistent &= aa == aa return consistent def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : int , ) -> int: lowercase_ = True lowercase_ = tree_map(lambda SCREAMING_SNAKE_CASE_ : a.shape if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ ) lowercase_ = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE_ ) else: # Otherwise, we can reuse the precomputed value lowercase_ = False if not consistent: lowercase_ = self._determine_favorable_chunk_size( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) lowercase_ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
30
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = 1_0 def _lowercase ( self : int ) -> List[str]: lowercase_ = [1, 2, 3, 4] lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[Any]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> List[Any]: lowercase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = '''''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) lowercase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = ['''It was the best of times.'''] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ = torch.tensor([1, 2, 3, 4] ) lowercase_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() ) def _lowercase ( self : int ) -> Dict: lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def _lowercase ( self : List[str] ) -> Tuple: lowercase_ = 1_0_1 lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
1
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def a ( snake_case__: int ): '''simple docstring''' lowercase_ = int(number**0.5 ) return number == sq * sq def a ( snake_case__: int , snake_case__: int , snake_case__: int , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' lowercase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowercase_ = x_den * y_den * z_den lowercase_ = gcd(snake_case__ , snake_case__ ) top //= hcf bottom //= hcf return top, bottom def a ( snake_case__: int = 35 ): '''simple docstring''' lowercase_ = set() lowercase_ = 42 lowercase_ = Fraction(0 ) lowercase_ = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowercase_ = x_num * y_den + x_den * y_num lowercase_ = x_den * y_den lowercase_ = gcd(snake_case__ , snake_case__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ = add_three( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) unique_s.add(snake_case__ ) # n=2 lowercase_ = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowercase_ = x_den * x_den * y_den * y_den if is_sq(snake_case__ ) and is_sq(snake_case__ ): lowercase_ = int(sqrt(snake_case__ ) ) lowercase_ = int(sqrt(snake_case__ ) ) lowercase_ = gcd(snake_case__ , snake_case__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ = add_three( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) unique_s.add(snake_case__ ) # n=-1 lowercase_ = x_num * y_num lowercase_ = x_den * y_num + x_num * y_den lowercase_ = gcd(snake_case__ , snake_case__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ = add_three( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) unique_s.add(snake_case__ ) # n=2 lowercase_ = x_num * x_num * y_num * y_num lowercase_ = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(snake_case__ ) and is_sq(snake_case__ ): lowercase_ = int(sqrt(snake_case__ ) ) lowercase_ = int(sqrt(snake_case__ ) ) lowercase_ = gcd(snake_case__ , snake_case__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase_ = add_three( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) unique_s.add(snake_case__ ) for num, den in unique_s: total += Fraction(snake_case__ , snake_case__ ) return total.denominator + total.numerator if __name__ == "__main__": print(f"{solution() = }")
30
def a ( snake_case__: list ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] lowercase_ = [] def generate(snake_case__: int , snake_case__: list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowercase_ , lowercase_ = arr[k - 1], arr[i] else: # k is odd lowercase_ , lowercase_ = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item) for item in user_input.split(',')] print(heaps(arr))
30
1
def a ( snake_case__: int , snake_case__: int ): '''simple docstring''' return number | (1 << position) def a ( snake_case__: int , snake_case__: int ): '''simple docstring''' return number & ~(1 << position) def a ( snake_case__: int , snake_case__: int ): '''simple docstring''' return number ^ (1 << position) def a ( snake_case__: int , snake_case__: int ): '''simple docstring''' return ((number >> position) & 1) == 1 def a ( snake_case__: int , snake_case__: int ): '''simple docstring''' return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
30
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , ) lowercase_ = parser.parse_args() return args def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ): '''simple docstring''' if not len(snake_case__ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowercase_ , lowercase_ = imgs[0].size lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) ) lowercase_ , lowercase_ = grid.size for i, img in enumerate(snake_case__ ): grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) ) return grid def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ): '''simple docstring''' lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ ) lowercase_ = pipeline( snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images lowercase_ = int(math.sqrt(snake_case__ ) ) lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __a = parse_args() # Load models and create wrapper for stable diffusion __a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') __a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') __a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') __a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') __a = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __a = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): __a = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: __a = unet.to(torch.device('cuda', args.cuda_id)) __a = pipeline.to(unet.device) __a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) __a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
30
1
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[Any] = DownBlockaD # noqa F405 a :Any = 'down' def _lowercase ( self : Dict ) -> str: lowercase_ = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :int = ResnetDownsampleBlockaD # noqa F405 a :Dict = 'down' def _lowercase ( self : Dict ) -> int: lowercase_ = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :int = AttnDownBlockaD # noqa F405 a :Tuple = 'down' def _lowercase ( self : Optional[Any] ) -> Optional[Any]: lowercase_ = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :str = CrossAttnDownBlockaD # noqa F405 a :str = 'down' def _lowercase ( self : List[Any] ) -> Optional[Any]: lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common() lowercase_ = 3_2 return init_dict, inputs_dict def _lowercase ( self : List[Any] ) -> Dict: lowercase_ = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :List[str] = SimpleCrossAttnDownBlockaD # noqa F405 a :List[Any] = 'down' @property def _lowercase ( self : Tuple ) -> Dict: return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] ) -> List[Any]: lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common() lowercase_ = 3_2 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Dict = SkipDownBlockaD # noqa F405 a :str = 'down' @property def _lowercase ( self : int ) -> Optional[int]: return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[int] ) -> List[str]: lowercase_ = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[Any] = AttnSkipDownBlockaD # noqa F405 a :Optional[Any] = 'down' @property def _lowercase ( self : Optional[int] ) -> List[str]: return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> Dict: lowercase_ = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Dict = DownEncoderBlockaD # noqa F405 a :Tuple = 'down' @property def _lowercase ( self : List[Any] ) -> Optional[int]: return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str ) -> Dict: lowercase_ = { '''in_channels''': 3_2, '''out_channels''': 3_2, } lowercase_ = self.dummy_input return init_dict, inputs_dict def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[int] = AttnDownEncoderBlockaD # noqa F405 a :Optional[Any] = 'down' @property def _lowercase ( self : List[str] ) -> Dict: return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = { '''in_channels''': 3_2, '''out_channels''': 3_2, } lowercase_ = self.dummy_input return init_dict, inputs_dict def _lowercase ( self : str ) -> Any: lowercase_ = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Dict = UNetMidBlockaD # noqa F405 a :str = 'mid' def _lowercase ( self : Any ) -> int: lowercase_ = { '''in_channels''': 3_2, '''temb_channels''': 1_2_8, } lowercase_ = self.dummy_input return init_dict, inputs_dict def _lowercase ( self : Optional[Any] ) -> Any: lowercase_ = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :List[Any] = UNetMidBlockaDCrossAttn # noqa F405 a :str = 'mid' def _lowercase ( self : List[Any] ) -> List[Any]: lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common() lowercase_ = 3_2 return init_dict, inputs_dict def _lowercase ( self : Any ) -> str: lowercase_ = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :str = UNetMidBlockaDSimpleCrossAttn # noqa F405 a :List[str] = 'mid' @property def _lowercase ( self : Any ) -> int: return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] ) -> List[Any]: lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common() lowercase_ = 3_2 return init_dict, inputs_dict def _lowercase ( self : Tuple ) -> int: lowercase_ = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :str = UpBlockaD # noqa F405 a :Optional[int] = 'up' @property def _lowercase ( self : List[str] ) -> Dict: return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] ) -> List[Any]: lowercase_ = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[Any] = ResnetUpsampleBlockaD # noqa F405 a :Tuple = 'up' @property def _lowercase ( self : int ) -> Dict: return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[Any]: lowercase_ = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Any = CrossAttnUpBlockaD # noqa F405 a :Optional[Any] = 'up' @property def _lowercase ( self : Optional[Any] ) -> Optional[int]: return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> Optional[Any]: lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common() lowercase_ = 3_2 return init_dict, inputs_dict def _lowercase ( self : Optional[Any] ) -> Optional[int]: lowercase_ = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405 a :List[str] = 'up' @property def _lowercase ( self : Tuple ) -> List[str]: return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str ) -> List[str]: lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common() lowercase_ = 3_2 return init_dict, inputs_dict def _lowercase ( self : Dict ) -> Any: lowercase_ = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[int] = AttnUpBlockaD # noqa F405 a :Tuple = 'up' @property def _lowercase ( self : Any ) -> str: return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def _lowercase ( self : Any ) -> Union[str, Any]: lowercase_ = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[int] = SkipUpBlockaD # noqa F405 a :Tuple = 'up' @property def _lowercase ( self : Tuple ) -> Any: return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str ) -> Optional[int]: lowercase_ = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Union[str, Any] = AttnSkipUpBlockaD # noqa F405 a :List[Any] = 'up' @property def _lowercase ( self : Optional[Any] ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> List[str]: lowercase_ = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Any = UpDecoderBlockaD # noqa F405 a :Optional[Any] = 'up' @property def _lowercase ( self : Dict ) -> Union[str, Any]: return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str ) -> Tuple: lowercase_ = {'''in_channels''': 3_2, '''out_channels''': 3_2} lowercase_ = self.dummy_input return init_dict, inputs_dict def _lowercase ( self : int ) -> Tuple: lowercase_ = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :List[Any] = AttnUpDecoderBlockaD # noqa F405 a :List[str] = 'up' @property def _lowercase ( self : Union[str, Any] ) -> Optional[int]: return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[int] ) -> str: lowercase_ = {'''in_channels''': 3_2, '''out_channels''': 3_2} lowercase_ = self.dummy_input return init_dict, inputs_dict def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(SCREAMING_SNAKE_CASE_ )
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
def a ( snake_case__: Tuple , snake_case__: Optional[Any] , snake_case__: Dict , snake_case__: str ): '''simple docstring''' if height >= 1: move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__ ) move_disk(snake_case__ , snake_case__ ) move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__ ) def a ( snake_case__: Optional[Any] , snake_case__: Optional[Any] ): '''simple docstring''' print('''moving disk from''' , snake_case__ , '''to''' , snake_case__ ) def a ( ): '''simple docstring''' lowercase_ = int(input('''Height of hanoi: ''' ).strip() ) move_tower(snake_case__ , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['DeiTFeatureExtractor'] __a = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
def a ( snake_case__: int ): '''simple docstring''' if num <= 0: raise ValueError('''Input must be a positive integer''' ) lowercase_ = [True] * (num + 1) lowercase_ = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , snake_case__ ): lowercase_ = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __a = int(input('Enter a positive integer: ').strip()) print(prime_sieve_eratosthenes(user_num))
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __a = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json', 'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json', 'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json', 'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json', 'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json', 'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json', 'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json', 'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json', 'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json', } class lowercase__( UpperCAmelCase ): """simple docstring""" a :int = 'xmod' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : Dict=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : int="gelu" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=1e-12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[Any]="absolute" , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=("en_XX",) , SCREAMING_SNAKE_CASE_ : List[str]=None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> List[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = classifier_dropout lowercase_ = pre_norm lowercase_ = adapter_reduction_factor lowercase_ = adapter_layer_norm lowercase_ = adapter_reuse_layer_norm lowercase_ = ln_before_adapter lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = default_language class lowercase__( UpperCAmelCase ): """simple docstring""" @property def _lowercase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
30
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig __a = logging.get_logger(__name__) # General docstring __a = 'RegNetConfig' # Base docstring __a = 'facebook/regnet-y-040' __a = [1, 1_0_8_8, 7, 7] # Image classification docstring __a = 'facebook/regnet-y-040' __a = 'tabby, tabby cat' __a = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) lowercase_ = ACTaFN[activation] if activation is not None else tf.identity def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_channels lowercase_ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]: lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) ) lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' ) lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ ) class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) lowercase_ = [ tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) for layer_module in self.attention: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden_state * pooled return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = in_channels != out_channels or stride != 1 lowercase_ = max(1 , out_channels // config.groups_width ) lowercase_ = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) lowercase_ = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ), ] lowercase_ = ACTaFN[config.hidden_act] def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: lowercase_ = hidden_state for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ ) hidden_state += residual lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer lowercase_ = [ # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ), *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int: for layer_module in self.layers: lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ ) return hidden_state class lowercase__( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention: lowercase_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ ) if output_hidden_states: lowercase_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ ) @keras_serializable class lowercase__( tf.keras.layers.Layer ): """simple docstring""" a :str = RegNetConfig def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = config lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' ) lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' ) lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' ) @unpack_inputs def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.encoder( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = encoder_outputs[0] lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ ) # Change to NCHW output format have uniformity in the modules lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Tuple = RegNetConfig a :Any = 'regnet' a :List[str] = 'pixel_values' @property def _lowercase ( self : List[str] ) -> str: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} __a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' __a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , ) class lowercase__( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = config.num_labels lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' ) # classification head lowercase_ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: lowercase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ = self.regnet( SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) lowercase_ = outputs.pooler_output if return_dict else outputs[1] lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ ) lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ ) lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) if not return_dict: lowercase_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
30
1
import numpy as np def a ( snake_case__: np.ndarray , snake_case__: float ): '''simple docstring''' return np.where(vector > 0 , snake_case__ , (alpha * (np.exp(snake_case__ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
30
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __a = logging.get_logger(__name__) def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ): '''simple docstring''' # Recurse if needed if "." in tensor_name: lowercase_ = tensor_name.split('''.''' ) for split in splits[:-1]: lowercase_ = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) lowercase_ = new_module lowercase_ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) lowercase_ = tensor_name in module._buffers lowercase_ = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) lowercase_ = False lowercase_ = False if is_buffer or not is_bitsandbytes_available(): lowercase_ = False lowercase_ = False else: lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: lowercase_ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to('''cpu''' ) if value.dtype == torch.inta: lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: lowercase_ = torch.tensor(snake_case__ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: lowercase_ = new_value.T lowercase_ = old_value.__dict__ if is_abit: lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) lowercase_ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: lowercase_ = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): lowercase_ = value.to(snake_case__ ) else: lowercase_ = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: lowercase_ = new_value else: lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) lowercase_ = new_value def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ): '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: lowercase_ = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): lowercase_ , lowercase_ = module.weight.shape else: lowercase_ = module.in_features lowercase_ = module.out_features if quantization_config.quantization_method() == "llm_int8": lowercase_ = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) lowercase_ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: lowercase_ = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) lowercase_ = True # Store the module class in case we need to transpose the weight later lowercase_ = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ): '''simple docstring''' lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert lowercase_ , lowercase_ = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def a ( *snake_case__: str , **snake_case__: Dict ): '''simple docstring''' warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def a ( *snake_case__: Any , **snake_case__: List[Any] ): '''simple docstring''' warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def a ( snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() lowercase_ = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowercase_ = sum(snake_case__ , [] ) lowercase_ = len(snake_case__ ) > 0 # Check if it is a base model lowercase_ = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowercase_ = list(model.named_children() ) lowercase_ = [list_modules[-1][0]] # add last module together with tied weights lowercase_ = set(snake_case__ ) - set(snake_case__ ) lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys lowercase_ = ['''.weight''', '''.bias'''] lowercase_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowercase_ = name.replace(snake_case__ , '''''' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
30
1
import torch from torch import nn class lowercase__( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Any: super().__init__() lowercase_ = n_token lowercase_ = d_embed lowercase_ = d_proj lowercase_ = cutoffs + [n_token] lowercase_ = [0] + self.cutoffs lowercase_ = div_val lowercase_ = self.cutoffs[0] lowercase_ = len(self.cutoffs ) - 1 lowercase_ = self.shortlist_size + self.n_clusters if self.n_clusters > 0: lowercase_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) lowercase_ = nn.Parameter(torch.zeros(self.n_clusters ) ) lowercase_ = nn.ModuleList() lowercase_ = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) else: self.out_projs.append(SCREAMING_SNAKE_CASE_ ) self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) else: for i in range(len(self.cutoffs ) ): lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase_ = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE_ , r_idx - l_idx ) ) lowercase_ = keep_order def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: if proj is None: lowercase_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: lowercase_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , proj.t().contiguous() ) lowercase_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ) -> Optional[Any]: if labels is not None: # Shift so that tokens < n predict n lowercase_ = hidden[..., :-1, :].contiguous() lowercase_ = labels[..., 1:].contiguous() lowercase_ = hidden.view(-1 , hidden.size(-1 ) ) lowercase_ = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' ) else: lowercase_ = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: lowercase_ = labels != -1_0_0 lowercase_ = torch.zeros_like(SCREAMING_SNAKE_CASE_ , dtype=hidden.dtype , device=hidden.device ) lowercase_ = ( -nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) else: # construct weights and biases lowercase_ , lowercase_ = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase_ = self.out_layers[0].weight[l_idx:r_idx] lowercase_ = self.out_layers[0].bias[l_idx:r_idx] else: lowercase_ = self.out_layers[i].weight lowercase_ = self.out_layers[i].bias if i == 0: lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(SCREAMING_SNAKE_CASE_ ) biases.append(SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ , lowercase_ = weights[0], biases[0], self.out_projs[0] lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 ) if labels is None: lowercase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: lowercase_ = torch.zeros_like(SCREAMING_SNAKE_CASE_ , dtype=hidden.dtype , device=hidden.device ) lowercase_ = 0 lowercase_ = [0] + self.cutoffs for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): lowercase_ , lowercase_ = cutoff_values[i], cutoff_values[i + 1] if labels is not None: lowercase_ = (labels >= l_idx) & (labels < r_idx) lowercase_ = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue lowercase_ = labels.index_select(0 , SCREAMING_SNAKE_CASE_ ) - l_idx lowercase_ = head_logprob.index_select(0 , SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden.index_select(0 , SCREAMING_SNAKE_CASE_ ) else: lowercase_ = hidden if i == 0: if labels is not None: lowercase_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: lowercase_ = head_logprob[:, : self.cutoffs[0]] else: lowercase_ , lowercase_ , lowercase_ = weights[i], biases[i], self.out_projs[i] lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 ) lowercase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: lowercase_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: lowercase_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i lowercase_ = logprob_i if labels is not None: if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order: out.index_copy_(0 , SCREAMING_SNAKE_CASE_ , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: if self.n_clusters == 0: lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) else: # construct weights and biases lowercase_ , lowercase_ = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase_ = self.out_layers[0].weight[l_idx:r_idx] lowercase_ = self.out_layers[0].bias[l_idx:r_idx] else: lowercase_ = self.out_layers[i].weight lowercase_ = self.out_layers[i].bias if i == 0: lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(SCREAMING_SNAKE_CASE_ ) biases.append(SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ , lowercase_ = weights[0], biases[0], self.out_projs[0] lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) ) lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 ) lowercase_ = [0] + self.cutoffs for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): lowercase_ , lowercase_ = cutoff_values[i], cutoff_values[i + 1] if i == 0: lowercase_ = head_logprob[:, : self.cutoffs[0]] else: lowercase_ , lowercase_ , lowercase_ = weights[i], biases[i], self.out_projs[i] lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 ) lowercase_ = head_logprob[:, -i] + tail_logprob_i lowercase_ = logprob_i return out
30
import argparse import os import re __a = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __a = re.compile(r'\s*\(\s*"(\S[^"]+)"') def a ( snake_case__: str , snake_case__: bool = False ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: lowercase_ = f.read() lowercase_ = content.split('''\n''' ) lowercase_ = [] lowercase_ = 0 while line_idx < len(snake_case__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowercase_ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowercase_ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(snake_case__ ) ) elif "\n".join(snake_case__ ) != content: return True def a ( snake_case__: bool = False ): '''simple docstring''' lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )] lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames] if not overwrite and any(snake_case__ ): lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix''' ''' this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __a = parser.parse_args() sort_all_auto_mappings(not args.check_only)
30
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if index == number_of_items: return 0 lowercase_ = 0 lowercase_ = 0 lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 ) if weights[index] <= max_weight: lowercase_ = values[index] + knapsack( snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 ) return max(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __a = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a = concatenate_datasets __a = DownloadConfig __a = DownloadManager __a = DownloadMode __a = DownloadConfig __a = DownloadMode __a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
30
import argparse from collections import defaultdict import yaml __a = 'docs/source/en/_toctree.yml' def a ( snake_case__: Dict ): '''simple docstring''' lowercase_ = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 lowercase_ = [key for key, value in counts.items() if value > 1] lowercase_ = [] for duplicate_key in duplicates: lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def a ( snake_case__: List[Any]=False ): '''simple docstring''' with open(snake_case__ , encoding='''utf-8''' ) as f: lowercase_ = yaml.safe_load(f.read() ) # Get to the API doc lowercase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase_ = content[api_idx]['''sections'''] # Then to the model doc lowercase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowercase_ = api_doc[model_idx]['''sections'''] lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section] lowercase_ = False for idx, modality_doc in modalities_docs: lowercase_ = modality_doc['''sections'''] lowercase_ = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: lowercase_ = True if overwrite: lowercase_ = new_modality_doc if diff: if overwrite: lowercase_ = model_doc lowercase_ = api_doc with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a = parser.parse_args() check_model_doc(args.fix_and_overwrite)
30
1
from datetime import datetime import matplotlib.pyplot as plt import torch def a ( snake_case__: Optional[int] ): '''simple docstring''' for param in module.parameters(): lowercase_ = False def a ( ): '''simple docstring''' lowercase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase_ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def a ( snake_case__: int ): '''simple docstring''' lowercase_ = plt.imshow(snake_case__ ) fig.axes.get_xaxis().set_visible(snake_case__ ) fig.axes.get_yaxis().set_visible(snake_case__ ) plt.show() def a ( ): '''simple docstring''' lowercase_ = datetime.now() lowercase_ = current_time.strftime('''%H:%M:%S''' ) return timestamp
30
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[str, Any] = 'upernet' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = backbone_config.get('''model_type''' ) lowercase_ = CONFIG_MAPPING[backbone_model_type] lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) lowercase_ = backbone_config lowercase_ = hidden_size lowercase_ = initializer_range lowercase_ = pool_scales lowercase_ = use_auxiliary_head lowercase_ = auxiliary_loss_weight lowercase_ = auxiliary_in_channels lowercase_ = auxiliary_channels lowercase_ = auxiliary_num_convs lowercase_ = auxiliary_concat_input lowercase_ = loss_ignore_index def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.backbone_config.to_dict() lowercase_ = self.__class__.model_type return output
30
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __a = 'sshleifer/bart-tiny-random' __a = 'patrickvonplaten/t5-tiny-random' @require_torch class lowercase__( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Any ) -> Tuple: return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _lowercase ( self : List[Any] ) -> List[Any]: lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: with self.assertRaises(SCREAMING_SNAKE_CASE_ ): create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
30
1
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[str, Any] = 'linear' a :Union[str, Any] = 'cosine' a :List[str] = 'cosine_with_restarts' a :Dict = 'polynomial' a :Tuple = 'constant' a :int = 'constant_with_warmup' a :Union[str, Any] = 'piecewise_constant' def a ( snake_case__: Optimizer , snake_case__: int = -1 ): '''simple docstring''' return LambdaLR(snake_case__ , lambda snake_case__ : 1 , last_epoch=snake_case__ ) def a ( snake_case__: Optimizer , snake_case__: int , snake_case__: int = -1 ): '''simple docstring''' def lr_lambda(snake_case__: int ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1.0 , snake_case__ ) ) return 1.0 return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ ) def a ( snake_case__: Optimizer , snake_case__: str , snake_case__: int = -1 ): '''simple docstring''' lowercase_ = {} lowercase_ = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowercase_ , lowercase_ = rule_str.split(''':''' ) lowercase_ = int(snake_case__ ) lowercase_ = float(snake_case__ ) lowercase_ = value lowercase_ = float(rule_list[-1] ) def create_rules_function(snake_case__: Optional[int] , snake_case__: int ): def rule_func(snake_case__: int ) -> float: lowercase_ = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(snake_case__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowercase_ = create_rules_function(snake_case__ , snake_case__ ) return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ ) def a ( snake_case__: List[str] , snake_case__: List[Any] , snake_case__: Dict , snake_case__: int=-1 ): '''simple docstring''' def lr_lambda(snake_case__: int ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) def a ( snake_case__: Optimizer , snake_case__: int , snake_case__: int , snake_case__: float = 0.5 , snake_case__: int = -1 ): '''simple docstring''' def lr_lambda(snake_case__: List[Any] ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) lowercase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case__ ) * 2.0 * progress )) ) return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) def a ( snake_case__: Optimizer , snake_case__: int , snake_case__: int , snake_case__: int = 1 , snake_case__: int = -1 ): '''simple docstring''' def lr_lambda(snake_case__: Any ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) lowercase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case__ ) * progress) % 1.0) )) ) return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) def a ( snake_case__: Dict , snake_case__: Dict , snake_case__: List[str] , snake_case__: Union[str, Any]=1e-7 , snake_case__: Tuple=1.0 , snake_case__: Optional[Any]=-1 ): '''simple docstring''' lowercase_ = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' ) def lr_lambda(snake_case__: int ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowercase_ = lr_init - lr_end lowercase_ = num_training_steps - num_warmup_steps lowercase_ = 1 - (current_step - num_warmup_steps) / decay_steps lowercase_ = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) __a = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def a ( snake_case__: Union[str, SchedulerType] , snake_case__: Optimizer , snake_case__: Optional[str] = None , snake_case__: Optional[int] = None , snake_case__: Optional[int] = None , snake_case__: int = 1 , snake_case__: float = 1.0 , snake_case__: int = -1 , ): '''simple docstring''' lowercase_ = SchedulerType(snake_case__ ) lowercase_ = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(snake_case__ , last_epoch=snake_case__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(snake_case__ , step_rules=snake_case__ , last_epoch=snake_case__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(snake_case__ , num_warmup_steps=snake_case__ , last_epoch=snake_case__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , num_cycles=snake_case__ , last_epoch=snake_case__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , power=snake_case__ , last_epoch=snake_case__ , ) return schedule_func( snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , last_epoch=snake_case__ )
30
def a ( snake_case__: int = 100 ): '''simple docstring''' lowercase_ = (n * (n + 1) // 2) ** 2 lowercase_ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"{solution() = }")
30
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Any ) -> Union[str, Any]: debug_launcher(test_script.main ) def _lowercase ( self : List[Any] ) -> Tuple: debug_launcher(test_ops.main )
30
import logging from transformers.configuration_utils import PretrainedConfig __a = logging.getLogger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[int] = 'masked_bert' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = pruning_method lowercase_ = mask_init lowercase_ = mask_scale
30
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[str, Any] = 'upernet' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = backbone_config.get('''model_type''' ) lowercase_ = CONFIG_MAPPING[backbone_model_type] lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) lowercase_ = backbone_config lowercase_ = hidden_size lowercase_ = initializer_range lowercase_ = pool_scales lowercase_ = use_auxiliary_head lowercase_ = auxiliary_loss_weight lowercase_ = auxiliary_in_channels lowercase_ = auxiliary_channels lowercase_ = auxiliary_num_convs lowercase_ = auxiliary_concat_input lowercase_ = loss_ignore_index def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.backbone_config.to_dict() lowercase_ = self.__class__.model_type return output
30
import os def a ( ): '''simple docstring''' lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
30
1
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class lowercase__( UpperCAmelCase ): """simple docstring""" a :torch.FloatTensor class lowercase__( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : int = 2_0 , SCREAMING_SNAKE_CASE_ : int = 7_6_8 , SCREAMING_SNAKE_CASE_ : Tuple=7_7 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : str = "silu" , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "linear" , SCREAMING_SNAKE_CASE_ : Optional[str] = "prd" , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , ) -> Tuple: super().__init__() lowercase_ = num_attention_heads lowercase_ = attention_head_dim lowercase_ = num_attention_heads * attention_head_dim lowercase_ = additional_embeddings lowercase_ = time_embed_dim or inner_dim lowercase_ = embedding_proj_dim or embedding_dim lowercase_ = clip_embed_dim or embedding_dim lowercase_ = Timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 ) lowercase_ = TimestepEmbedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , out_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if embedding_proj_norm_type is None: lowercase_ = None elif embedding_proj_norm_type == "layer": lowercase_ = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' ) lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if encoder_hid_proj_type is None: lowercase_ = None elif encoder_hid_proj_type == "linear": lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' ) lowercase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , SCREAMING_SNAKE_CASE_ ) ) if added_emb_type == "prd": lowercase_ = nn.Parameter(torch.zeros(1 , 1 , SCREAMING_SNAKE_CASE_ ) ) elif added_emb_type is None: lowercase_ = None else: raise ValueError( f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' ) lowercase_ = nn.ModuleList( [ BasicTransformerBlock( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , activation_fn='''gelu''' , attention_bias=SCREAMING_SNAKE_CASE_ , ) for d in range(SCREAMING_SNAKE_CASE_ ) ] ) if norm_in_type == "layer": lowercase_ = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ) elif norm_in_type is None: lowercase_ = None else: raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' ) lowercase_ = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 ) causal_attention_mask.triu_(1 ) lowercase_ = causal_attention_mask[None, ...] self.register_buffer('''causal_attention_mask''' , SCREAMING_SNAKE_CASE_ , persistent=SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _lowercase ( self : Dict ) -> Dict[str, AttentionProcessor]: lowercase_ = {} def fn_recursive_add_processors(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : torch.nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, AttentionProcessor] ): if hasattr(SCREAMING_SNAKE_CASE_ , '''set_processor''' ): lowercase_ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'''{name}.{sub_name}''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return processors def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> int: lowercase_ = len(self.attn_processors.keys() ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != count: raise ValueError( f'''A dict of processors was passed, but the number of processors {len(SCREAMING_SNAKE_CASE_ )} does not match the''' f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : torch.nn.Module , SCREAMING_SNAKE_CASE_ : int ): if hasattr(SCREAMING_SNAKE_CASE_ , '''set_processor''' ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): module.set_processor(SCREAMING_SNAKE_CASE_ ) else: module.set_processor(processor.pop(f'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'''{name}.{sub_name}''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for name, module in self.named_children(): fn_recursive_attn_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> Optional[int]: self.set_attn_processor(AttnProcessor() ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.BoolTensor] = None , SCREAMING_SNAKE_CASE_ : bool = True , ) -> List[Any]: lowercase_ = hidden_states.shape[0] lowercase_ = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): lowercase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: lowercase_ = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase_ = timesteps * torch.ones(SCREAMING_SNAKE_CASE_ , dtype=timesteps.dtype , device=timesteps.device ) lowercase_ = self.time_proj(SCREAMING_SNAKE_CASE_ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. lowercase_ = timesteps_projected.to(dtype=self.dtype ) lowercase_ = self.time_embedding(SCREAMING_SNAKE_CASE_ ) if self.embedding_proj_norm is not None: lowercase_ = self.embedding_proj_norm(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.embedding_proj(SCREAMING_SNAKE_CASE_ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: lowercase_ = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE_ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' ) lowercase_ = self.proj_in(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.positional_embedding.to(hidden_states.dtype ) lowercase_ = [] lowercase_ = 0 if encoder_hidden_states is not None: additional_embeds.append(SCREAMING_SNAKE_CASE_ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: lowercase_ = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: lowercase_ = hidden_states[:, None, :] lowercase_ = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: lowercase_ = self.prd_embedding.to(hidden_states.dtype ).expand(SCREAMING_SNAKE_CASE_ , -1 , -1 ) additional_embeds.append(SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.cat( SCREAMING_SNAKE_CASE_ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens lowercase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: lowercase_ = F.pad( SCREAMING_SNAKE_CASE_ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) lowercase_ = hidden_states + positional_embeddings if attention_mask is not None: lowercase_ = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0 lowercase_ = F.pad(SCREAMING_SNAKE_CASE_ , (0, self.additional_embeddings) , value=0.0 ) lowercase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) lowercase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: lowercase_ = self.norm_in(SCREAMING_SNAKE_CASE_ ) for block in self.transformer_blocks: lowercase_ = block(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) lowercase_ = self.norm_out(SCREAMING_SNAKE_CASE_ ) if self.prd_embedding is not None: lowercase_ = hidden_states[:, -1] else: lowercase_ = hidden_states[:, additional_embeddings_len:] lowercase_ = self.proj_to_clip_embeddings(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict: lowercase_ = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
30
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ): '''simple docstring''' lowercase_ = { '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) assert base_extractor.is_extractable(snake_case__ ) lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ): '''simple docstring''' lowercase_ = { '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } lowercase_ = input_paths[compression_format] if input_path is None: lowercase_ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) lowercase_ = Extractor.infer_extractor_format(snake_case__ ) assert extractor_format is not None lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(snake_case__ , snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name lowercase_ = file_path.read_text(encoding='''utf-8''' ) else: lowercase_ = output_path.read_text(encoding='''utf-8''' ) lowercase_ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_dot_dot''' directory.mkdir() lowercase_ = directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) ) return path @pytest.fixture def a ( snake_case__: int ): '''simple docstring''' import tarfile lowercase_ = tmp_path / '''data_sym_link''' directory.mkdir() lowercase_ = directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ ) with tarfile.TarFile(snake_case__ , '''w''' ) as f: f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , ) def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ): '''simple docstring''' lowercase_ = { '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } lowercase_ = insecure_tar_files[insecure_tar_file] lowercase_ = tmp_path / '''extracted''' TarExtractor.extract(snake_case__ , snake_case__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def a ( snake_case__: Optional[int] ): '''simple docstring''' # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number lowercase_ = tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 lowercase_ = ( B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(snake_case__ ) assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
30
1
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def a ( snake_case__: Optional[int] ): '''simple docstring''' return EnvironmentCommand() class lowercase__( UpperCAmelCase ): """simple docstring""" @staticmethod def _lowercase ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> Tuple: lowercase_ = parser.add_parser('''env''' ) download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict ) -> int: lowercase_ = huggingface_hub.__version__ lowercase_ = '''not installed''' lowercase_ = '''NA''' if is_torch_available(): import torch lowercase_ = torch.__version__ lowercase_ = torch.cuda.is_available() lowercase_ = '''not installed''' if is_transformers_available(): import transformers lowercase_ = transformers.__version__ lowercase_ = '''not installed''' if is_accelerate_available(): import accelerate lowercase_ = accelerate.__version__ lowercase_ = '''not installed''' if is_xformers_available(): import xformers lowercase_ = xformers.__version__ lowercase_ = { '''`diffusers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''', '''Huggingface_hub version''': hub_version, '''Transformers version''': transformers_version, '''Accelerate version''': accelerate_version, '''xFormers version''': xformers_version, '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(SCREAMING_SNAKE_CASE_ ) ) return info @staticmethod def _lowercase ( SCREAMING_SNAKE_CASE_ : int ) -> Any: return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
30
from __future__ import annotations def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase_ , lowercase_ = array[indexa], array[indexa] def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) for i in range(snake_case__ , low + middle ): comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ ) def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ): '''simple docstring''' if length > 1: lowercase_ = int(length / 2 ) bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 ) bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 ) bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
30
1