code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """MobileNetV1Config"""
# Base docstring
SCREAMING_SNAKE_CASE__ : int = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE__ : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ : Tuple = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ : Tuple = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowercase ( snake_case, snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :str = {}
if isinstance(snake_case, snake_case ):
__magic_name__ :List[str] = model.mobilenet_va
else:
__magic_name__ :Union[str, Any] = model
__magic_name__ :List[Any] = '''MobilenetV1/Conv2d_0/'''
__magic_name__ :Union[str, Any] = backbone.conv_stem.convolution.weight
__magic_name__ :str = backbone.conv_stem.normalization.bias
__magic_name__ :Tuple = backbone.conv_stem.normalization.weight
__magic_name__ :str = backbone.conv_stem.normalization.running_mean
__magic_name__ :List[Any] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__magic_name__ :List[str] = i + 1
__magic_name__ :int = i * 2
__magic_name__ :Union[str, Any] = backbone.layer[pt_index]
__magic_name__ :Any = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__magic_name__ :Optional[int] = pointer.convolution.weight
__magic_name__ :Union[str, Any] = pointer.normalization.bias
__magic_name__ :Dict = pointer.normalization.weight
__magic_name__ :Dict = pointer.normalization.running_mean
__magic_name__ :List[str] = pointer.normalization.running_var
__magic_name__ :Tuple = backbone.layer[pt_index + 1]
__magic_name__ :Any = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__magic_name__ :Tuple = pointer.convolution.weight
__magic_name__ :int = pointer.normalization.bias
__magic_name__ :int = pointer.normalization.weight
__magic_name__ :Dict = pointer.normalization.running_mean
__magic_name__ :int = pointer.normalization.running_var
if isinstance(snake_case, snake_case ):
__magic_name__ :Optional[int] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__magic_name__ :Optional[int] = model.classifier.weight
__magic_name__ :List[Any] = model.classifier.bias
return tf_to_pt_map
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__magic_name__ :Optional[Any] = tf.train.list_variables(snake_case )
__magic_name__ :Optional[Any] = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
__magic_name__ :Optional[Any] = tf.train.load_variable(snake_case, snake_case )
__magic_name__ :str = array
# Build TF to PyTorch weights loading map
__magic_name__ :Any = _build_tf_to_pytorch_map(snake_case, snake_case, snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
__magic_name__ :List[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__magic_name__ :Any = np.transpose(snake_case, (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__magic_name__ :Dict = array.squeeze().transpose()
else:
__magic_name__ :Optional[Any] = np.transpose(snake_case, (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
__magic_name__ :List[str] = torch.from_numpy(snake_case )
tf_weights.pop(snake_case, snake_case )
tf_weights.pop(name + '''/RMSProp''', snake_case )
tf_weights.pop(name + '''/RMSProp_1''', snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''', snake_case )
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :int = features.shape[-2:]
__magic_name__ , __magic_name__ :Optional[Any] = conv_layer.stride
__magic_name__ , __magic_name__ :Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
__magic_name__ :Optional[int] = max(kernel_height - stride_height, 0 )
else:
__magic_name__ :List[Any] = max(kernel_height - (in_height % stride_height), 0 )
if in_width % stride_width == 0:
__magic_name__ :Any = max(kernel_width - stride_width, 0 )
else:
__magic_name__ :str = max(kernel_width - (in_width % stride_width), 0 )
__magic_name__ :Tuple = pad_along_width // 2
__magic_name__ :Any = pad_along_width - pad_left
__magic_name__ :Union[str, Any] = pad_along_height // 2
__magic_name__ :str = pad_along_height - pad_top
__magic_name__ :Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case, snake_case, '''constant''', 0.0 )
class lowerCamelCase_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = 1 , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
"""simple docstring"""
super().__init__()
__magic_name__ :List[str] = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__magic_name__ :Union[str, Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__magic_name__ :Union[str, Any] = nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=__lowerCAmelCase , groups=__lowerCAmelCase , bias=__lowerCAmelCase , padding_mode='''zeros''' , )
if use_normalization:
__magic_name__ :Tuple = nn.BatchNormad(
num_features=__lowerCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=__lowerCAmelCase , track_running_stats=__lowerCAmelCase , )
else:
__magic_name__ :Optional[Any] = None
if use_activation:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowerCAmelCase ):
__magic_name__ :Any = ACTaFN[config.hidden_act]
else:
__magic_name__ :Union[str, Any] = config.hidden_act
else:
__magic_name__ :List[Any] = None
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.config.tf_padding:
__magic_name__ :Optional[Any] = apply_tf_padding(__lowerCAmelCase , self.convolution )
__magic_name__ :str = self.convolution(__lowerCAmelCase )
if self.normalization is not None:
__magic_name__ :Optional[int] = self.normalization(__lowerCAmelCase )
if self.activation is not None:
__magic_name__ :int = self.activation(__lowerCAmelCase )
return features
class lowerCamelCase_ ( lowerCamelCase ):
a__ = MobileNetVaConfig
a__ = load_tf_weights_in_mobilenet_va
a__ = '''mobilenet_v1'''
a__ = '''pixel_values'''
a__ = False
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__ : str = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ : str = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , lowerCamelCase , )
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = True ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = config
__magic_name__ :Optional[Any] = 3_2
__magic_name__ :List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
__magic_name__ :List[Any] = MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=config.num_channels , out_channels=__lowerCAmelCase , kernel_size=3 , stride=2 , )
__magic_name__ :Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__magic_name__ :Dict = nn.ModuleList()
for i in range(1_3 ):
__magic_name__ :Optional[int] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__magic_name__ :Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=1 , ) )
__magic_name__ :Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__ :Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__magic_name__ :str = self.conv_stem(__lowerCAmelCase )
__magic_name__ :Optional[int] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__magic_name__ :int = layer_module(__lowerCAmelCase )
if output_hidden_states:
__magic_name__ :List[str] = all_hidden_states + (hidden_states,)
__magic_name__ :Tuple = hidden_states
if self.pooler is not None:
__magic_name__ :Optional[int] = torch.flatten(self.pooler(__lowerCAmelCase ) , start_dim=1 )
else:
__magic_name__ :Optional[int] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCamelCase , )
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
__magic_name__ :Optional[Any] = config.num_labels
__magic_name__ :List[str] = MobileNetVaModel(__lowerCAmelCase )
__magic_name__ :Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__magic_name__ :int = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = nn.Linear(__lowerCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ :List[str] = self.mobilenet_va(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
__magic_name__ :str = outputs.pooler_output if return_dict else outputs[1]
__magic_name__ :Any = self.classifier(self.dropout(__lowerCAmelCase ) )
__magic_name__ :Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__magic_name__ :List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__magic_name__ :Union[str, Any] = '''single_label_classification'''
else:
__magic_name__ :Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
__magic_name__ :int = MSELoss()
if self.num_labels == 1:
__magic_name__ :Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__magic_name__ :str = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
__magic_name__ :Optional[Any] = CrossEntropyLoss()
__magic_name__ :int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__magic_name__ :Optional[Any] = BCEWithLogitsLoss()
__magic_name__ :Union[str, Any] = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
__magic_name__ :Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , )
| 0 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
__snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class __lowerCamelCase (_a ):
_lowercase = """whisper"""
_lowercase = ["""past_key_values"""]
_lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Union[str, Any],A_: List[str]=5_1865,A_: Tuple=80,A_: List[Any]=6,A_: Dict=4,A_: Dict=6,A_: List[str]=4,A_: List[str]=1536,A_: int=1536,A_: List[str]=0.0,A_: Any=0.0,A_: List[str]=5_0257,A_: Tuple=True,A_: Dict=True,A_: Optional[Any]="gelu",A_: Tuple=256,A_: Dict=0.0,A_: List[Any]=0.0,A_: Dict=0.0,A_: int=0.0_2,A_: List[Any]=False,A_: List[str]=1500,A_: int=448,A_: Dict=5_0256,A_: Dict=5_0256,A_: List[str]=5_0256,A_: Dict=None,A_: List[Any]=[220, 5_0256],A_: Dict=False,A_: str=256,A_: Tuple=False,A_: List[Any]=0.0_5,A_: Dict=10,A_: Optional[int]=2,A_: List[str]=0.0,A_: Optional[Any]=10,A_: Union[str, Any]=0,A_: Dict=7,**A_: List[Any],):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = num_mel_bins
__UpperCamelCase = d_model
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase = max_source_positions
__UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
__UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
__UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,is_encoder_decoder=A_,decoder_start_token_id=A_,suppress_tokens=A_,begin_suppress_tokens=A_,**A_,)
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__UpperCamelCase = {0: 'batch'}
else:
__UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_,direction='inputs' )
return common_inputs
def snake_case_ ( self: Tuple,A_: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],A_: int = -1,A_: int = -1,A_: bool = False,A_: Optional["TensorType"] = None,A_: int = 2_2050,A_: float = 5.0,A_: int = 220,):
'''simple docstring'''
__UpperCamelCase = OrderedDict()
__UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self,preprocessor=preprocessor.feature_extractor,batch_size=A_,framework=A_,sampling_rate=A_,time_duration=A_,frequency=A_,)
__UpperCamelCase = encoder_inputs['input_features'].shape[2]
__UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer,A_,A_,A_,A_ )
__UpperCamelCase = encoder_inputs.pop('input_features' )
__UpperCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__UpperCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return 1E-3
| 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 600_851_475_143 ) -> int:
try:
_A = int(_snake_case )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
_A = 1
_A = 2
while i * i <= n:
while n % i == 0:
_A = i
n //= i
i += 1
if n > 1:
_A = n
return int(_snake_case )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase : List[str] = logging.getLogger(__name__)
def A_( ):
UpperCamelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.')
parser.add_argument(
'--dataset_name' , type=A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.')
parser.add_argument(
'--tokenizer_name_or_path' , type=A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=A , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=A , default='train' , choices=['train', 'test', 'validation'])
parser.add_argument(
'--limit' , default=A , type=A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=A , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
UpperCamelCase = parser.parse_args()
return args
def A_( A : Any):
def fn(A : Union[str, Any]):
return tokenizer(examples['text'])
return fn
def A_( A : Optional[Any]):
UpperCamelCase = []
for i in range(len(tokenized_data['input_ids'])):
UpperCamelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i])),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i])),
}
UpperCamelCase = tf.train.Features(feature=A)
UpperCamelCase = tf.train.Example(features=A)
UpperCamelCase = example.SerializeToString()
records.append(A)
return records
def A_( A : List[Any]):
UpperCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split)
if args.limit is not None:
UpperCamelCase = min(len(A) , args.limit)
UpperCamelCase = dataset.select(range(A))
print(f'''Limiting the dataset to {args.limit} entries.''')
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path)
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
UpperCamelCase = os.path.join(args.output_dir , args.split)
if not os.path.exists(A):
os.makedirs(A)
else:
UpperCamelCase = os.path.join(args.output_dir , args.split)
# Tokenize the whole dataset at once.
UpperCamelCase = tokenize_function(A)
UpperCamelCase = dataset.map(A , batched=A , num_proc=4 , remove_columns=['text'])
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : int):
# Concatenate all texts.
UpperCamelCase = {k: sum(examples[k] , []) for k in examples.keys()}
UpperCamelCase = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , A , args.max_length)]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase = dataset_tokenized.map(A , batched=A , batch_size=1000 , num_proc=4)
UpperCamelCase = 0
UpperCamelCase = 0
for shard in range(0 , len(A) , args.shard_size):
UpperCamelCase = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase = len(dataset_snapshot['input_ids'])
UpperCamelCase = os.path.join(A , f'''dataset-{shard_count}-{records_containing}.tfrecord''')
UpperCamelCase = get_serialized_examples(A)
with tf.io.TFRecordWriter(A) as out_file:
for i in range(len(A)):
UpperCamelCase = serialized_examples[i]
out_file.write(A)
print('Wrote file {} containing {} records'.format(A , A))
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w') as f:
print(f'''Total {args.split} records: {total_records}''' , file=A)
if __name__ == "__main__":
lowerCAmelCase : List[str] = parse_args()
main(args)
| 3 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a :
def __init__( self , _snake_case , _snake_case=99 , _snake_case=13 , _snake_case=7 , _snake_case=9 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case=8 , _snake_case=0.1 , _snake_case=0.002 , _snake_case=1 , _snake_case=0 , _snake_case=0 , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = encoder_seq_length
lowerCAmelCase = decoder_seq_length
# For common tests
lowerCAmelCase = self.decoder_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = d_ff
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = dropout_rate
lowerCAmelCase = initializer_factor
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = decoder_start_token_id
lowerCAmelCase = None
lowerCAmelCase = decoder_layers
def UpperCamelCase__ ( self ):
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base' )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_snake_case )
if decoder_head_mask is None:
lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
if cross_attn_head_mask is None:
lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = self.get_config()
lowerCAmelCase = config.num_attention_heads
lowerCAmelCase = self.prepare_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, input_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
input_ids=_snake_case , decoder_input_ids=_snake_case , attention_mask=_snake_case , decoder_attention_mask=_snake_case , )
lowerCAmelCase = model(input_ids=_snake_case , decoder_input_ids=_snake_case )
lowerCAmelCase = result.last_hidden_state
lowerCAmelCase = result.past_key_values
lowerCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
lowerCAmelCase = model(_snake_case , use_cache=_snake_case )
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
lowerCAmelCase ,lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = model(_snake_case )['last_hidden_state']
lowerCAmelCase = model(_snake_case , past_key_values=_snake_case )['last_hidden_state']
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
lowerCAmelCase = model(**_snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = True
snake_case__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case__ = [0.8, 0.9]
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=_snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs[0]
lowerCAmelCase = UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
lowerCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case , head_masking.items() ):
lowerCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=_snake_case )
lowerCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_snake_case , return_dict_in_generate=_snake_case , **_snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_snake_case ).to(_snake_case )
lowerCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_snake_case , legacy=_snake_case )
lowerCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCAmelCase = tokenizer(_snake_case , return_tensors='pt' , padding=_snake_case ).input_ids
# fmt: off
lowerCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case , _snake_case )
lowerCAmelCase = model.generate(input_ids.to(_snake_case ) )
lowerCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 4 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
# TODO: upload to AWS
_lowercase = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : int = '''retribert'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=8 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=True , _lowercase=128 , _lowercase=0 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = share_encoders
_lowerCAmelCase = projection_dim
| 5 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Tuple , __A :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
def __iter__( self :Union[str, Any] ) -> int:
"""simple docstring"""
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=True ):
SCREAMING_SNAKE_CASE__ = Accelerator(even_batches=UpperCamelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Accelerator , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: bool = False ):
if iterable:
SCREAMING_SNAKE_CASE__ = DummyIterableDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
else:
SCREAMING_SNAKE_CASE__ = TensorDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
return dl
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Accelerator , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: List[int] , UpperCamelCase__: List[int] , ):
SCREAMING_SNAKE_CASE__ = create_dataloader(accelerator=UpperCamelCase__ , dataset_size=UpperCamelCase__ , batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE__ = output.sum()
loss.backward()
batch_idxs.append(UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE__ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
SCREAMING_SNAKE_CASE__ = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE__ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = original_state
if __name__ == "__main__":
main() | 6 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
"""simple docstring"""
import math
import qiskit
def _snake_case ( _snake_case : int = 1 , _snake_case : int = 1 , _snake_case : int = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(_snake_case , _snake_case )
or isinstance(_snake_case , _snake_case )
or isinstance(_snake_case , _snake_case )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(_snake_case ) != input_a)
or (math.floor(_snake_case ) != input_a)
or (math.floor(_snake_case ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_A = qiskit.QuantumRegister(4 , 'qr' )
_A = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_A = [input_a, input_a, carry_in]
_A = qiskit.QuantumCircuit(_snake_case , _snake_case )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _snake_case ) # measure the last two qbits
_A = qiskit.Aer.get_backend('aer_simulator' )
_A = qiskit.execute(_snake_case , _snake_case , shots=10_00 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 7 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : int = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = "vit_msn"
def __init__( self : Tuple , _snake_case : Any=7_68 , _snake_case : List[str]=12 , _snake_case : Dict=12 , _snake_case : str=30_72 , _snake_case : str="gelu" , _snake_case : Tuple=0.0 , _snake_case : Dict=0.0 , _snake_case : int=0.02 , _snake_case : Any=1E-06 , _snake_case : str=2_24 , _snake_case : List[str]=16 , _snake_case : Optional[int]=3 , _snake_case : Optional[Any]=True , **_snake_case : Dict , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = qkv_bias
| 9 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_lowerCAmelCase, _lowerCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_lowerCAmelCase = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_lowerCAmelCase = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCAmelCase = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 10 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase_ = logging.get_logger(__name__)
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'linear'
__lowerCamelCase : Optional[Any] = 'cosine'
__lowerCamelCase : Optional[int] = 'cosine_with_restarts'
__lowerCamelCase : Optional[Any] = 'polynomial'
__lowerCamelCase : Optional[int] = 'constant'
__lowerCamelCase : Dict = 'constant_with_warmup'
__lowerCamelCase : Optional[int] = 'piecewise_constant'
def lowerCAmelCase (__A , __A = -1):
"""simple docstring"""
return LambdaLR(__A , lambda __A: 1 , last_epoch=__A)
def lowerCAmelCase (__A , __A , __A = -1):
"""simple docstring"""
def lr_lambda(__A):
if current_step < num_warmup_steps:
return float(__A) / float(max(1.0 , __A))
return 1.0
return LambdaLR(__A , __A , last_epoch=__A)
def lowerCAmelCase (__A , __A , __A = -1):
"""simple docstring"""
_a = {}
_a = step_rules.split(''',''')
for rule_str in rule_list[:-1]:
_a , _a = rule_str.split(''':''')
_a = int(__A)
_a = float(__A)
_a = value
_a = float(rule_list[-1])
def create_rules_function(__A , __A):
def rule_func(__A) -> float:
_a = sorted(rules_dict.keys())
for i, sorted_step in enumerate(__A):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_a = create_rules_function(__A , __A)
return LambdaLR(__A , __A , last_epoch=__A)
def lowerCAmelCase (__A , __A , __A , __A=-1):
"""simple docstring"""
def lr_lambda(__A):
if current_step < num_warmup_steps:
return float(__A) / float(max(1 , __A))
return max(
0.0 , float(num_training_steps - current_step) / float(max(1 , num_training_steps - num_warmup_steps)))
return LambdaLR(__A , __A , __A)
def lowerCAmelCase (__A , __A , __A , __A = 0.5 , __A = -1):
"""simple docstring"""
def lr_lambda(__A):
if current_step < num_warmup_steps:
return float(__A) / float(max(1 , __A))
_a = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps))
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__A) * 2.0 * progress)))
return LambdaLR(__A , __A , __A)
def lowerCAmelCase (__A , __A , __A , __A = 1 , __A = -1):
"""simple docstring"""
def lr_lambda(__A):
if current_step < num_warmup_steps:
return float(__A) / float(max(1 , __A))
_a = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__A) * progress) % 1.0))))
return LambdaLR(__A , __A , __A)
def lowerCAmelCase (__A , __A , __A , __A=1e-7 , __A=1.0 , __A=-1):
"""simple docstring"""
_a = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''')
def lr_lambda(__A):
if current_step < num_warmup_steps:
return float(__A) / float(max(1 , __A))
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_a = lr_init - lr_end
_a = num_training_steps - num_warmup_steps
_a = 1 - (current_step - num_warmup_steps) / decay_steps
_a = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__A , __A , __A)
lowercase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase (__A , __A , __A = None , __A = None , __A = None , __A = 1 , __A = 1.0 , __A = -1 , ):
"""simple docstring"""
_a = SchedulerType(__A)
_a = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__A , last_epoch=__A)
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__A , step_rules=__A , last_epoch=__A)
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''')
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__A , num_warmup_steps=__A , last_epoch=__A)
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''')
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , num_cycles=__A , last_epoch=__A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , power=__A , last_epoch=__A , )
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , last_epoch=__A)
| 11 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Any = parent
lowercase__ : Optional[Any] = 13
lowercase__ : Any = 7
lowercase__ : Optional[Any] = 30
lowercase__ : int = self.seq_length + self.mem_len
lowercase__ : str = 15
lowercase__ : int = True
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = 99
lowercase__ : Any = [10, 50, 80]
lowercase__ : str = 32
lowercase__ : Tuple = 32
lowercase__ : int = 4
lowercase__ : Tuple = 8
lowercase__ : Optional[int] = 1_28
lowercase__ : Any = 2
lowercase__ : Optional[int] = 2
lowercase__ : List[Any] = None
lowercase__ : Union[str, Any] = 1
lowercase__ : List[Any] = 0
lowercase__ : Union[str, Any] = 3
lowercase__ : Tuple = self.vocab_size - 1
lowercase__ : Union[str, Any] = 0.0_1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase__ ( self):
'''simple docstring'''
random.seed(self.seed)
tf.random.set_seed(self.seed)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = TFTransfoXLModel(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : List[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
lowercase__ , lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : int = {"""input_ids""": input_ids_a, """labels""": lm_labels}
lowercase__ , lowercase__ : str = model(SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ , lowercase__ : Dict = model([input_ids_a, mems_a]).to_tuple()
lowercase__ : Tuple = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
lowercase__ , lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : Any = config_and_inputs
lowercase__ : Any = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCAmelCase : Union[str, Any] = () if is_tf_available() else ()
__lowerCAmelCase : Optional[int] = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCAmelCase : Any = False
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[Any] = False
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = TFTransfoXLModelTester(self)
lowercase__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , d_embed=37)
def lowercase__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self):
'''simple docstring'''
self.model_tester.set_seed()
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.model_tester.set_seed()
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
lowercase__ : Tuple = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Layer)
lowercase__ : List[Any] = model.get_bias()
assert name is None
else:
lowercase__ : Union[str, Any] = model.get_output_embeddings()
assert x is None
lowercase__ : Optional[Any] = model.get_bias()
assert name is None
def lowercase__ ( self):
'''simple docstring'''
pass
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""")
def lowercase__ ( self):
'''simple docstring'''
pass
@require_tf
class _snake_case ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""")
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""")
# fmt: off
lowercase__ : int = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase__ : Optional[int] = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase__ : List[Any] = model.generate(SCREAMING_SNAKE_CASE_ , max_length=2_00 , do_sample=SCREAMING_SNAKE_CASE_)
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_)
| 12 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : str = 'owlvit_text_model'
def __init__( self , SCREAMING_SNAKE_CASE_=4_94_08 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=4_94_06 , SCREAMING_SNAKE_CASE_=4_94_07 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[Any] = initializer_factor
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'owlvit_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = hidden_size
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : str = num_channels
__lowerCamelCase : Dict = image_size
__lowerCamelCase : str = patch_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Any = initializer_factor
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Tuple = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 'owlvit'
lowerCamelCase : Dict = True
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2.6_5_9_2 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if text_config is None:
__lowerCamelCase : Dict = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__lowerCamelCase : Optional[Any] = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__lowerCamelCase : List[str] = OwlViTTextConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = projection_dim
__lowerCamelCase : List[str] = logit_scale_init_value
__lowerCamelCase : List[Any] = return_dict
__lowerCamelCase : str = 1.0
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase : str = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[int] = text_config
__lowerCamelCase : Dict = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> str:
__lowerCamelCase : int = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Optional[Any] = self.text_config.to_dict()
__lowerCamelCase : Any = self.vision_config.to_dict()
__lowerCamelCase : str = self.__class__.model_type
return output
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def lowercase_ ( self ) -> float:
return 1E-4
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = None , ) -> Mapping[str, Any]:
__lowerCamelCase : Optional[int] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
return {**text_input_dict, **image_input_dict}
@property
def lowercase_ ( self ) -> int:
return 14
| 13 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "convbert"
def __init__( self , _a=3_0_5_2_2 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=1 , _a=0 , _a=2 , _a=7_6_8 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ) -> Tuple:
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
_a : Optional[int] = vocab_size
_a : str = hidden_size
_a : str = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : int = intermediate_size
_a : List[str] = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : str = type_vocab_size
_a : Union[str, Any] = initializer_range
_a : Union[str, Any] = layer_norm_eps
_a : str = embedding_size
_a : Tuple = head_ratio
_a : Tuple = conv_kernel_size
_a : Union[str, Any] = num_groups
_a : Dict = classifier_dropout
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 14 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase__ = emb.weight.data
return lin_layer
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str="facebook/mbart-large-en-ro" , __magic_name__ : Any=False , __magic_name__ : int=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch.load(__magic_name__ , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__magic_name__ )
lowercase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowercase__ = MBartConfig.from_pretrained(__magic_name__ , vocab_size=__magic_name__ )
if mbart_aa and finetuned:
lowercase__ = """relu"""
lowercase__ = state_dict["""decoder.embed_tokens.weight"""]
lowercase__ = MBartForConditionalGeneration(__magic_name__ )
model.model.load_state_dict(__magic_name__ )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
A : Any = parser.parse_args()
A : str = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 15 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env" )
def _snake_case ( self : Optional[int] , __lowerCamelCase : int ):
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE = {
"enabled": True,
"processes_per_host": 8,
}
SCREAMING_SNAKE_CASE = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
SCREAMING_SNAKE_CASE = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
SCREAMING_SNAKE_CASE = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] ):
TrainingJobAnalytics(__lowerCamelCase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def _snake_case ( self : Dict , __lowerCamelCase : Optional[int] ):
# create estimator
SCREAMING_SNAKE_CASE = self.create_estimator(__lowerCamelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase ) | 16 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = XLNetTokenizer
_lowercase : str = XLNetTokenizerFast
_lowercase : str = True
_lowercase : List[Any] = True
def lowerCAmelCase_ ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
__A : int = XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : int = """<s>"""
__A : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(__A ) , 1006 )
def lowerCAmelCase_ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase_ ( self : str ):
__A : Any = XLNetTokenizer(__A , keep_accents=__A )
__A : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] )
__A : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__A : List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__A : str = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Union[str, Any] = XLNetTokenizer(__A , do_lower_case=__A )
__A : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def lowerCAmelCase_ ( self : int ):
__A : Optional[Any] = XLNetTokenizer(__A , do_lower_case=__A )
__A : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def lowerCAmelCase_ ( self : str ):
__A : Optional[int] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
__A : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
__A : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
__A : List[str] = tokenizer.build_inputs_with_special_tokens(__A )
__A : Any = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
# fmt: off
__A : Dict = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 17 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_SCREAMING_SNAKE_CASE = "\\n Text data.\n Second line of data."
_SCREAMING_SNAKE_CASE = "file"
@pytest.fixture(scope="session" )
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCAmelCase = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCAmelCase = input_paths[compression_format]
_lowerCAmelCase = tmp_path / "cache"
_lowerCAmelCase = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
_lowerCAmelCase = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
_lowerCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = "custom_cache"
_lowerCAmelCase = "custom_extracted_dir"
_lowerCAmelCase = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCAmelCase = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCAmelCase = xz_file
_lowerCAmelCase = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
_lowerCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
_lowerCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
_lowerCAmelCase = "./__missing_file__.txt"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
_lowerCAmelCase = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
_lowerCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def __a():
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head("s3://huggingface.co" )
| 18 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_a = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
_UpperCamelCase = [trans(img.convert('''RGB''' ) ) for img in image]
_UpperCamelCase = torch.stack(__snake_case )
return image
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a) -> Dict:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCamelCase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=__a , scheduler=__a)
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Dict:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> int:
'''simple docstring'''
if not isinstance(__a , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__a)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_UpperCamelCase = init_latents.shape
_UpperCamelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a)
# get latents
print('''add noise to latents at timestep''' , __a)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , __a = None , __a = 0.8 , __a = 1 , __a = None , __a = 0.0 , __a = 50 , __a = None , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__a)
# 2. Preprocess image
_UpperCamelCase = preprocess(__a)
# 3. set timesteps
self.scheduler.set_timesteps(__a , device=self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# 4. Prepare latent variables
_UpperCamelCase = self.prepare_latents(__a , __a , __a , self.unet.dtype , self.device , __a)
_UpperCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__a):
# 1. predict noise model_output
_UpperCamelCase = self.unet(__a , __a).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(
__a , __a , __a , eta=__a , use_clipped_model_output=__a , generator=__a , ).prev_sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__a)
| 19 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> List[str]:
a__ ='ylacombe/bark-small'
a__ =tempfile.mkdtemp()
a__ ='en_speaker_1'
a__ ='This is a test string'
a__ ='speaker_embeddings_path.json'
a__ ='speaker_embeddings'
def __UpperCamelCase ( self , **lowercase_) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_)
def __UpperCamelCase ( self) -> str:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.get_tokenizer()
a__ =BarkProcessor(tokenizer=lowercase_)
processor.save_pretrained(self.tmpdirname)
a__ =BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def __UpperCamelCase ( self) -> List[str]:
a__ =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def __UpperCamelCase ( self) -> List[Any]:
a__ =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a__ =35
a__ =2
a__ =8
a__ ={
'semantic_prompt': np.ones(lowercase_),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len)),
'fine_prompt': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
a__ =processor(text=self.input_string , voice_preset=lowercase_)
a__ =inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([])).tolist())
# test loading voice preset from npz file
a__ =os.path.join(self.tmpdirname , 'file.npz')
np.savez(lowercase_ , **lowercase_)
a__ =processor(text=self.input_string , voice_preset=lowercase_)
a__ =inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([])).tolist())
# test loading voice preset from the hub
a__ =processor(text=self.input_string , voice_preset=self.voice_preset)
def __UpperCamelCase ( self) -> Dict:
a__ =self.get_tokenizer()
a__ =BarkProcessor(tokenizer=lowercase_)
a__ =processor(text=self.input_string)
a__ =tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 20 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger(__name__)
def snake_case_ (UpperCamelCase : Dict ):
'''simple docstring'''
if "resnet-50" in model_name:
_a = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
_a = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
_a = DetrConfig(use_timm_backbone=UpperCamelCase , backbone_config=UpperCamelCase )
# set label attributes
_a = '''panoptic''' in model_name
if is_panoptic:
_a = 250
else:
_a = 91
_a = '''huggingface/label-files'''
_a = '''coco-detection-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def snake_case_ (UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = state_dict.pop(UpperCamelCase )
_a = val
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
_a = ''''''
if is_panoptic:
_a = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:256, :]
_a = in_proj_bias[:256]
_a = in_proj_weight[256:512, :]
_a = in_proj_bias[256:512]
_a = in_proj_weight[-256:, :]
_a = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_a = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:256, :]
_a = in_proj_bias[:256]
_a = in_proj_weight[256:512, :]
_a = in_proj_bias[256:512]
_a = in_proj_weight[-256:, :]
_a = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_a = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_a = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a = in_proj_weight_cross_attn[:256, :]
_a = in_proj_bias_cross_attn[:256]
_a = in_proj_weight_cross_attn[256:512, :]
_a = in_proj_bias_cross_attn[256:512]
_a = in_proj_weight_cross_attn[-256:, :]
_a = in_proj_bias_cross_attn[-256:]
def snake_case_ ():
'''simple docstring'''
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple=None , UpperCamelCase : int=False ):
'''simple docstring'''
_a , _a = get_detr_config(UpperCamelCase )
# load original model from torch hub
_a = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
_a = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=UpperCamelCase ).eval()
_a = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCamelCase ):
if is_panoptic:
_a = '''detr.''' + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
_a = state_dict.pop(UpperCamelCase )
_a = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_a = state_dict.pop(UpperCamelCase )
_a = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
_a = state_dict.pop(UpperCamelCase )
_a = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_a = state_dict.pop(UpperCamelCase )
_a = val
# finally, create HuggingFace model and load state dict
_a = DetrForSegmentation(UpperCamelCase ) if is_panoptic else DetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion on an image
_a = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
_a = DetrImageProcessor(format=UpperCamelCase )
_a = processor(images=prepare_img() , return_tensors='''pt''' )
_a = encoding['''pixel_values''']
_a = detr(UpperCamelCase )
_a = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
_snake_case : Optional[int] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 22 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ = 'A painting of a squirrel eating a burger'
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = sd_pipe.prepare_inputs(_UpperCAmelCase )
UpperCamelCase_ = replicate(_UpperCAmelCase )
UpperCamelCase_ = shard(_UpperCAmelCase )
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = jax.random.split(_UpperCAmelCase , jax.device_count() )
UpperCamelCase_ = sd_pipe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_inference_steps=25 , jit=_UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ = images[0, 253:256, 253:256, -1]
UpperCamelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = 'stabilityai/stable-diffusion-2'
UpperCamelCase_ , UpperCamelCase_ = FlaxDPMSolverMultistepScheduler.from_pretrained(_UpperCAmelCase , subfolder='scheduler' )
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
_UpperCAmelCase , scheduler=_UpperCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ = scheduler_params
UpperCamelCase_ = 'A painting of a squirrel eating a burger'
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = sd_pipe.prepare_inputs(_UpperCAmelCase )
UpperCamelCase_ = replicate(_UpperCAmelCase )
UpperCamelCase_ = shard(_UpperCAmelCase )
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = jax.random.split(_UpperCAmelCase , jax.device_count() )
UpperCamelCase_ = sd_pipe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_inference_steps=25 , jit=_UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ = images[0, 253:256, 253:256, -1]
UpperCamelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 23 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
__snake_case = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f'''{test_file} instead.''' )
__snake_case = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
__snake_case = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__snake_case = '''.'''.join(_lowerCamelCase )
return test_module_path
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
__snake_case = get_module_path(_lowerCamelCase )
__snake_case = importlib.import_module(_lowerCamelCase )
return test_module
def _UpperCamelCase (_lowerCamelCase : int )-> List[str]:
'''simple docstring'''
__snake_case = []
__snake_case = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(_lowerCamelCase , _lowerCamelCase ) )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
__snake_case = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__snake_case = getattr(_lowerCamelCase , '''all_model_classes''' , [] )
if len(_lowerCamelCase ) > 0:
test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : List[str] )-> str:
'''simple docstring'''
__snake_case = get_test_classes(_lowerCamelCase )
__snake_case = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : Tuple )-> Any:
'''simple docstring'''
__snake_case = test_class()
if hasattr(_lowerCamelCase , '''setUp''' ):
test.setUp()
__snake_case = None
if hasattr(_lowerCamelCase , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__snake_case = test.model_tester.__class__
return model_tester
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = get_test_classes(_lowerCamelCase )
__snake_case = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : int )-> Optional[int]:
'''simple docstring'''
__snake_case = get_test_classes_for_model(_lowerCamelCase , _lowerCamelCase )
__snake_case = []
for test_class in test_classes:
__snake_case = get_model_tester_from_test_class(_lowerCamelCase )
if tester_class is not None:
tester_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x.__name__ )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = get_test_classes(_lowerCamelCase )
__snake_case = {test_class: get_model_tester_from_test_class(_lowerCamelCase ) for test_class in test_classes}
return test_tester_mapping
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = get_model_classes(_lowerCamelCase )
__snake_case = {
model_class: get_test_classes_for_model(_lowerCamelCase , _lowerCamelCase ) for model_class in model_classes
}
return model_test_mapping
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = get_model_classes(_lowerCamelCase )
__snake_case = {
model_class: get_tester_classes_for_model(_lowerCamelCase , _lowerCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return o
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return o.__name__
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_json(_lowerCamelCase ) for x in o]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return {to_json(_lowerCamelCase ): to_json(_lowerCamelCase ) for k, v in o.items()}
else:
return o
| 24 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = create_model(
"HTSAT-tiny" , "roberta" , _a , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_a , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Optional[Any] = r".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE : int = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE : Any = key.replace(_a , _a)
if re.match(_a , _a):
# replace sequential layers with list
SCREAMING_SNAKE_CASE : Optional[int] = re.match(_a , _a).group(1)
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_a)//3}.linear.")
elif re.match(_a , _a):
SCREAMING_SNAKE_CASE : str = int(re.match(_a , _a).group(1))
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE : List[str] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}.")
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE : Optional[Any] = value
SCREAMING_SNAKE_CASE : Dict = mixed_qkv.size(0) // 3
SCREAMING_SNAKE_CASE : Dict = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE : Dict = query_layer
SCREAMING_SNAKE_CASE : str = key_layer
SCREAMING_SNAKE_CASE : List[str] = value_layer
else:
SCREAMING_SNAKE_CASE : Any = value
return model_state_dict
def lowerCamelCase__ ( _a , _a , _a , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = init_clap(_a , enable_fusion=_a)
clap_model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = clap_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(_a)
SCREAMING_SNAKE_CASE : Any = ClapConfig()
SCREAMING_SNAKE_CASE : Tuple = enable_fusion
SCREAMING_SNAKE_CASE : str = ClapModel(_a)
# ignore the spectrogram embedding layer
model.load_state_dict(_a , strict=_a)
model.save_pretrained(_a)
transformers_config.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 25 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger()
@dataclass
class _A :
lowercase__: nn.Module
lowercase__: List[nn.Module] = field(default_factory=__lowercase )
lowercase__: list = field(default_factory=__lowercase )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Tensor , __magic_name__ : Tensor ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = len(list(m.modules() ) ) == 1 or isinstance(__magic_name__ , nn.Convad ) or isinstance(__magic_name__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__magic_name__ )
def __call__( self : Optional[int] , __magic_name__ : Tensor ) -> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__magic_name__ )
[x.remove() for x in self.handles]
return self
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
return list(filter(lambda __magic_name__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _A :
lowercase__: nn.Module
lowercase__: nn.Module
lowercase__: int = 1
lowercase__: List = field(default_factory=__lowercase )
lowercase__: List = field(default_factory=__lowercase )
lowercase__: bool = True
def __call__( self : Dict , __magic_name__ : Tensor ) -> List[str]:
"""simple docstring"""
__snake_case : Any = Tracker(self.dest )(__magic_name__ ).parametrized
__snake_case : Dict = Tracker(self.src )(__magic_name__ ).parametrized
__snake_case : List[str] = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.src_skip , __magic_name__ ) )
__snake_case : List[str] = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.dest_skip , __magic_name__ ) )
if len(__magic_name__ ) != len(__magic_name__ ) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(__magic_name__ )} operations while'''
f''' destination module has {len(__magic_name__ )}.''' )
for dest_m, src_m in zip(__magic_name__ , __magic_name__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
class _A ( nn.Module ):
def __init__( self : Dict , __magic_name__ : nn.Module ) -> Any:
"""simple docstring"""
super().__init__()
__snake_case : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), f'''Unexpected layer name {k}'''
__snake_case : Optional[int] = len(__magic_name__ ) + 1
feature_blocks.append((f'''res{block_index}''', v) )
__snake_case : str = nn.ModuleDict(__magic_name__ )
def lowercase__ ( self : int , __magic_name__ : Tensor ) -> Tuple:
"""simple docstring"""
return get_trunk_forward_outputs(
__magic_name__ , out_feat_keys=__magic_name__ , feature_blocks=self._feature_blocks , )
class _A ( __lowercase ):
def lowercase__ ( self : Any , __magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : List[Any] = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : int , __magic_name__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
if x not in self:
__snake_case : Tuple = self.convert_name_to_timm(__magic_name__ )
__snake_case : List[str] = partial(lambda: (timm.create_model(__magic_name__ , pretrained=__magic_name__ ).eval(), None) )
else:
__snake_case : Tuple = super().__getitem__(__magic_name__ )
return val
class _A ( __lowercase ):
def __getitem__( self : Optional[Any] , __magic_name__ : str ) -> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
__snake_case : Any = RegNetModel
else:
__snake_case : Tuple = RegNetForImageClassification
return val
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
for from_key, to_key in keys:
__snake_case : int = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) -> Union[str, Any]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
__snake_case , __snake_case : str = from_model_func()
__snake_case : Optional[Any] = our_model_func(_lowerCamelCase ).eval()
__snake_case : Dict = ModuleTransfer(src=_lowerCamelCase , dest=_lowerCamelCase , raise_if_mismatch=_lowerCamelCase )
__snake_case : Union[str, Any] = torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCamelCase )
if from_state_dict is not None:
__snake_case : Optional[Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__snake_case : Any = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
__snake_case : List[str] = manually_copy_vissl_head(_lowerCamelCase , our_model.state_dict() , _lowerCamelCase )
our_model.load_state_dict(_lowerCamelCase )
__snake_case : str = our_model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
__snake_case : Any = (
our_outputs.logits if isinstance(_lowerCamelCase , _lowerCamelCase ) else our_outputs.last_hidden_state
)
__snake_case : List[str] = from_model(_lowerCamelCase )
__snake_case : List[Any] = from_output[-1] if type(_lowerCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__snake_case : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(_lowerCamelCase , _lowerCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
__snake_case : Optional[int] = 224 if """seer""" not in name else 384
# we can use the convnext one
__snake_case : List[Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=_lowerCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
print(F'''Pushed {name}''' )
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True ) -> List[str]:
"""simple docstring"""
__snake_case : int = """imagenet-1k-id2label.json"""
__snake_case : int = 1000
__snake_case : Any = (1, num_labels)
__snake_case : Union[str, Any] = """huggingface/label-files"""
__snake_case : List[str] = num_labels
__snake_case : int = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__snake_case : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
__snake_case : int = partial(_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
__snake_case : Optional[Any] = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
__snake_case : List[str] = NameToOurModelFuncMap()
__snake_case : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_lowerCamelCase , _lowerCamelCase ) -> Tuple[nn.Module, Dict]:
__snake_case : List[str] = torch.hub.load_state_dict_from_url(_lowerCamelCase , model_dir=str(_lowerCamelCase ) , map_location="""cpu""" )
__snake_case : Optional[Any] = model_func()
# check if we have a head, if yes add it
__snake_case : str = files["""classy_state_dict"""]["""base_model"""]["""model"""]
__snake_case : Any = model_state_dict["""trunk"""]
model.load_state_dict(_lowerCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__snake_case : List[str] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[str] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Tuple = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : str = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__snake_case : Dict = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[Any] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Union[str, Any] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : str = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _lowerCamelCase , _lowerCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 26 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Tuple=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Optional[int]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
for old_item in old_list:
SCREAMING_SNAKE_CASE : List[Any] = old_item.replace('in_layers.0' ,'norm1' )
SCREAMING_SNAKE_CASE : Optional[Any] = new_item.replace('in_layers.2' ,'conv1' )
SCREAMING_SNAKE_CASE : Dict = new_item.replace('out_layers.0' ,'norm2' )
SCREAMING_SNAKE_CASE : int = new_item.replace('out_layers.3' ,'conv2' )
SCREAMING_SNAKE_CASE : Tuple = new_item.replace('emb_layers.1' ,'time_emb_proj' )
SCREAMING_SNAKE_CASE : Optional[Any] = new_item.replace('skip_connection' ,'conv_shortcut' )
SCREAMING_SNAKE_CASE : List[Any] = shave_segments(__UpperCamelCase ,n_shave_prefix_segments=__UpperCamelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[str]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for old_item in old_list:
SCREAMING_SNAKE_CASE : str = old_item
SCREAMING_SNAKE_CASE : Optional[int] = new_item.replace('norm.weight' ,'group_norm.weight' )
SCREAMING_SNAKE_CASE : Dict = new_item.replace('norm.bias' ,'group_norm.bias' )
SCREAMING_SNAKE_CASE : Dict = new_item.replace('proj_out.weight' ,'proj_attn.weight' )
SCREAMING_SNAKE_CASE : List[str] = new_item.replace('proj_out.bias' ,'proj_attn.bias' )
SCREAMING_SNAKE_CASE : List[Any] = shave_segments(__UpperCamelCase ,n_shave_prefix_segments=__UpperCamelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Dict=None ,__UpperCamelCase: Union[str, Any]=None ,__UpperCamelCase: Any=None ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
SCREAMING_SNAKE_CASE : Optional[int] = old_checkpoint[path]
SCREAMING_SNAKE_CASE : str = old_tensor.shape[0] // 3
SCREAMING_SNAKE_CASE : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
SCREAMING_SNAKE_CASE : Any = old_tensor.shape[0] // config['num_head_channels'] // 3
SCREAMING_SNAKE_CASE : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = old_tensor.split(channels // num_heads ,dim=1 )
SCREAMING_SNAKE_CASE : Tuple = query.reshape(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = key.reshape(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = value.reshape(__UpperCamelCase )
for path in paths:
SCREAMING_SNAKE_CASE : List[str] = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
SCREAMING_SNAKE_CASE : str = new_path.replace('middle_block.0' ,'mid_block.resnets.0' )
SCREAMING_SNAKE_CASE : int = new_path.replace('middle_block.1' ,'mid_block.attentions.0' )
SCREAMING_SNAKE_CASE : Any = new_path.replace('middle_block.2' ,'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
SCREAMING_SNAKE_CASE : Union[str, Any] = new_path.replace(replacement['old'] ,replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = old_checkpoint[path['old']][:, :, 0]
else:
SCREAMING_SNAKE_CASE : Tuple = old_checkpoint[path['old']]
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : int = checkpoint['time_embed.0.weight']
SCREAMING_SNAKE_CASE : Dict = checkpoint['time_embed.0.bias']
SCREAMING_SNAKE_CASE : List[str] = checkpoint['time_embed.2.weight']
SCREAMING_SNAKE_CASE : List[str] = checkpoint['time_embed.2.bias']
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['input_blocks.0.0.weight']
SCREAMING_SNAKE_CASE : Dict = checkpoint['input_blocks.0.0.bias']
SCREAMING_SNAKE_CASE : Dict = checkpoint['out.0.weight']
SCREAMING_SNAKE_CASE : str = checkpoint['out.0.bias']
SCREAMING_SNAKE_CASE : str = checkpoint['out.2.weight']
SCREAMING_SNAKE_CASE : Dict = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
SCREAMING_SNAKE_CASE : List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
SCREAMING_SNAKE_CASE : Any = {
layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
SCREAMING_SNAKE_CASE : Tuple = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
SCREAMING_SNAKE_CASE : str = {
layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
SCREAMING_SNAKE_CASE : Any = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
SCREAMING_SNAKE_CASE : int = {
layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Tuple = (i - 1) // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : List[str] = (i - 1) % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : Any = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key]
SCREAMING_SNAKE_CASE : Optional[int] = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in checkpoint:
SCREAMING_SNAKE_CASE : List[str] = checkpoint[
f"input_blocks.{i}.0.op.weight"
]
SCREAMING_SNAKE_CASE : Dict = checkpoint[
f"input_blocks.{i}.0.op.bias"
]
continue
SCREAMING_SNAKE_CASE : int = renew_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = {'old': f"input_blocks.{i}.0", 'new': f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
SCREAMING_SNAKE_CASE : str = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path, resnet_op] ,config=__UpperCamelCase )
if len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = renew_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = {
'old': f"input_blocks.{i}.1",
'new': f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
SCREAMING_SNAKE_CASE : str = {
f"input_blocks.{i}.1.qkv.bias": {
'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"input_blocks.{i}.1.qkv.weight": {
'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=__UpperCamelCase ,config=__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : List[str] = middle_blocks[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = middle_blocks[1]
SCREAMING_SNAKE_CASE : List[Any] = middle_blocks[2]
SCREAMING_SNAKE_CASE : Any = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = renew_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,attention_paths_to_split=__UpperCamelCase ,config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = i // (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : str = i % (config['num_res_blocks'] + 1)
SCREAMING_SNAKE_CASE : Dict = [shave_segments(__UpperCamelCase ,2 ) for name in output_blocks[i]]
SCREAMING_SNAKE_CASE : str = {}
for layer in output_block_layers:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = layer.split('.' )[0], shave_segments(__UpperCamelCase ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = [layer_name]
if len(__UpperCamelCase ) > 1:
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
SCREAMING_SNAKE_CASE : Dict = renew_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = renew_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = {'old': f"output_blocks.{i}.0", 'new': f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
SCREAMING_SNAKE_CASE : Optional[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[
f"output_blocks.{i}.{index}.conv.weight"
]
SCREAMING_SNAKE_CASE : List[str] = checkpoint[
f"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
SCREAMING_SNAKE_CASE : List[str] = []
if len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = {
'old': f"output_blocks.{i}.1",
'new': f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
SCREAMING_SNAKE_CASE : int = {
f"output_blocks.{i}.1.qkv.bias": {
'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"output_blocks.{i}.1.qkv.weight": {
'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None ,config=__UpperCamelCase ,)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_resnet_paths(__UpperCamelCase ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
SCREAMING_SNAKE_CASE : Dict = '.'.join(['output_blocks', str(__UpperCamelCase ), path['old']] )
SCREAMING_SNAKE_CASE : Optional[int] = '.'.join(['up_blocks', str(__UpperCamelCase ), 'resnets', str(__UpperCamelCase ), path['new']] )
SCREAMING_SNAKE_CASE : int = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase_ = json.loads(f.read())
UpperCamelCase_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase_ = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase_ = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( lowerCAmelCase__ ):
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
A_ = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class __lowerCamelCase ( lowerCAmelCase ):
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
lowerCamelCase_ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=UpperCAmelCase , required=UpperCAmelCase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=UpperCAmelCase , required=UpperCAmelCase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=UpperCAmelCase , required=UpperCAmelCase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=UpperCAmelCase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , ):
lowerCamelCase_ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f"Loading model {model_type}" )
lowerCamelCase_ = model_type
lowerCamelCase_ = tf_checkpoint
lowerCamelCase_ = pytorch_dump_output
lowerCamelCase_ = config
lowerCamelCase_ = finetuning_task_name
def UpperCAmelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase_ = self._tf_checkpoint
lowerCamelCase_ = ''''''
else:
lowerCamelCase_ = self._tf_checkpoint
lowerCamelCase_ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase , self._config , self._pytorch_dump_output , UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 29 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = (3, 32, 128)
UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : Any = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase_ : Any = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
UpperCAmelCase_ : List[str] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,_SCREAMING_SNAKE_CASE )
with open(self.image_processor_file ,'''w''' ,encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) )
return image_input
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Tuple = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : List[Any] = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : int = MgpstrProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : Dict = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = '''test'''
UpperCAmelCase_ : Optional[Any] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = '''test'''
UpperCAmelCase_ : List[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.char_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [seq.replace(''' ''' ,'''''' ) for seq in decoded_tok]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = torch.randn(1 ,27 ,38 )
UpperCAmelCase_ : Any = torch.randn(1 ,27 ,50_257 )
UpperCAmelCase_ : List[Any] = torch.randn(1 ,27 ,30_522 )
UpperCAmelCase_ : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) ,['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 30 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "poolformer"
def __init__( self : List[Any] , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : str=4.0 , _lowerCAmelCase : List[Any]=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , _lowerCAmelCase : Optional[Any]=[7, 3, 3, 3] , _lowerCAmelCase : Union[str, Any]=[4, 2, 2, 2] , _lowerCAmelCase : Optional[int]=[2, 1, 1, 1] , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=1E-5 , _lowerCAmelCase : Optional[Any]=0.02 , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = stride
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = pool_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_layer_scale
SCREAMING_SNAKE_CASE_ = layer_scale_init_value
SCREAMING_SNAKE_CASE_ = initializer_range
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 2E-3 | 31 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
import warnings
from functools import wraps
from typing import Callable
def A__ ( SCREAMING_SNAKE_CASE_ : Callable ) -> Callable:
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE_ )
def _inner_fn(*SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : int ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , SCREAMING_SNAKE_CASE_ , )
return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return _inner_fn | 32 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE_ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__ ( cls) -> Dict:
UpperCamelCase = TOKEN
HfFolder.save_token(lowerCamelCase_)
@classmethod
def UpperCAmelCase__ ( cls) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCamelCase = BertConfig.from_pretrained(F'{USER}/test-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_ , repo_id='''test-config''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token)
UpperCamelCase = BertConfig.from_pretrained(F'{USER}/test-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_))
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token)
UpperCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_))
def UpperCAmelCase__ ( self) -> Tuple:
CustomConfig.register_for_auto_class()
UpperCamelCase = CustomConfig(attribute=4_2)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCamelCase = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=lowerCamelCase_)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 4_2)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCamelCase = c.n_embd + 1 # int
UpperCamelCase = c.resid_pdrop + 1.0 # float
UpperCamelCase = not c.scale_attn_weights # bool
UpperCamelCase = c.summary_type + '''foo''' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}')
self.assertEqual(lowerCamelCase_ , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(lowerCamelCase_ , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(lowerCamelCase_ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(lowerCamelCase_ , c.summary_type , '''mismatch for key: summary_type''')
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = PretrainedConfig()
UpperCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_ , lowerCamelCase_)]
if len(lowerCamelCase_) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F' {", ".join(lowerCamelCase_)}.')
def UpperCAmelCase__ ( self) -> str:
with self.assertRaises(lowerCamelCase_):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 5_0_0
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase_) as mock_head:
UpperCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCamelCase = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_)
UpperCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase_ , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCamelCase = AutoConfig.from_pretrained(lowerCamelCase_)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCamelCase = ['''config.42.0.0.json''']
UpperCamelCase = 7_6_8
configuration.save_pretrained(lowerCamelCase_)
shutil.move(os.path.join(lowerCamelCase_ , '''config.4.0.0.json''') , os.path.join(lowerCamelCase_ , '''config.42.0.0.json'''))
UpperCamelCase = AutoConfig.from_pretrained(lowerCamelCase_)
self.assertEqual(new_configuration.hidden_size , 7_6_8)
def UpperCAmelCase__ ( self) -> Optional[Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
UpperCamelCase = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCamelCase = '''v4.0.0'''
UpperCamelCase , UpperCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_ , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCamelCase = '''v3.0.0'''
UpperCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_)
self.assertEqual(old_configuration.hidden_size , 7_6_8) | 34 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__lowercase : Tuple = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__lowercase : int = f'''https://www.google.com/search?q={query}&num=100'''
__lowercase : int = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__lowercase : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__lowercase : Union[str, Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 36 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase : Tuple = random.Random()
if is_torch_available():
import torch
def UpperCamelCase_ ( __a , __a=1.0 , __a=None , __a=None ) -> Dict:
if rng is None:
a__ : int = global_rng
a__ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : Tuple=400 , lowerCamelCase__ : List[str]=2_000 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Any=16_000 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , ):
a__ : List[Any] = parent
a__ : Dict = batch_size
a__ : List[Any] = min_seq_length
a__ : Tuple = max_seq_length
a__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : Dict = feature_size
a__ : Any = padding_value
a__ : List[Any] = sampling_rate
a__ : Tuple = return_attention_mask
a__ : int = do_normalize
def _UpperCamelCase( self : Tuple ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase( self : str , lowerCamelCase__ : str=False , lowerCamelCase__ : Optional[Any]=False ):
def _flatten(lowerCamelCase__ : Dict ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
a__ : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__ : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ : Any = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = ASTFeatureExtractor
def _UpperCamelCase( self : Dict ):
a__ : List[Any] = ASTFeatureExtractionTester(self )
def _UpperCamelCase( self : str ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a__ : str = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
a__ : List[str] = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="np" ).input_values
a__ : str = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a__ : List[Any] = np.asarray(lowerCamelCase__ )
a__ : int = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
a__ : str = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
@require_torch
def _UpperCamelCase( self : str ):
import torch
a__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : int = np.random.rand(100 ).astype(np.floataa )
a__ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[str] ):
from datasets import load_dataset
a__ : Optional[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__ : Union[str, Any] = ds.sort("id" ).select(range(lowerCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def _UpperCamelCase( self : str ):
# fmt: off
a__ : List[Any] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
a__ : Union[str, Any] = self._load_datasamples(1 )
a__ : Dict = ASTFeatureExtractor()
a__ : List[str] = feature_extractor(lowerCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase__ , atol=1E-4 ) )
| 37 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Initialise PyTorch model
snake_case_ = RemBertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print('''Building PyTorch model from configuration: {}'''.format(str(SCREAMING_SNAKE_CASE__ ) ) )
snake_case_ = RemBertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 39 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] ) -> Dict:
UpperCamelCase : Optional[Any] = k_size // 2
UpperCamelCase , UpperCamelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : str = 1 / (2 * pi * sigma) * exp(-(square(snake_case__ ) + square(snake_case__ )) / (2 * square(snake_case__ )) )
return g
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase : List[str] = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : Optional[Any] = height - k_size + 1
UpperCamelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[str] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Dict = 0
for i, j in product(range(snake_case__ ) , range(snake_case__ ) ):
UpperCamelCase : Tuple = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(snake_case__ , snake_case__ )
UpperCamelCase : Optional[int] = ravel(snake_case__ )
# reshape and get the dst image
UpperCamelCase : Dict = dot(snake_case__ , snake_case__ ).reshape(snake_case__ , snake_case__ ).astype(snake_case__ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
__UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 40 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = len(A__ )
for i in range(n - 1 ):
for j in range(i + 1 , A__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _A ( A__ ):
"""simple docstring"""
if len(A__ ) <= 1:
return arr, 0
__lowercase = len(A__ ) // 2
__lowercase = arr[0:mid]
__lowercase = arr[mid:]
__lowercase , __lowercase = count_inversions_recursive(A__ )
__lowercase , __lowercase = count_inversions_recursive(A__ )
__lowercase , __lowercase = _count_cross_inversions(A__ , A__ )
__lowercase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = __lowercase = __lowercase = 0
while i < len(A__ ) and j < len(A__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _A ( ):
"""simple docstring"""
__lowercase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowercase = count_inversions_bf(A__ )
__lowercase , __lowercase = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , A__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowercase = count_inversions_bf(A__ )
__lowercase , __lowercase = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , A__ )
# an empty list should also have zero inversions
__lowercase = []
__lowercase = count_inversions_bf(A__ )
__lowercase , __lowercase = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , A__ )
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
if attention_mask is None:
lowercase__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _a :
def __init__( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: str=7 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: str=99 , UpperCamelCase_: Any=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=4 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: int=32 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Optional[int]=0.02 , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = eos_token_id
lowercase__ = pad_token_id
lowercase__ = bos_token_id
lowercase__ = initializer_range
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowercase__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowercase__ = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowercase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowercase__ = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = model_class_name(UpperCamelCase_ )
lowercase__ = model.encode(inputs_dict['''input_ids'''] )
lowercase__ , lowercase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = model_class_name(UpperCamelCase_ )
lowercase__ = model.encode(inputs_dict['''input_ids'''] )
lowercase__ , lowercase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowercase__ = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class _a ( unittest.TestCase ):
_lowercase : List[str] = 99
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
lowercase__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowercase__ = input_ids.shape[0]
lowercase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._get_config_and_data()
lowercase__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowercase__ = lm_model(input_ids=UpperCamelCase_ )
lowercase__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowercase__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowercase__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowercase__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowercase__ = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowercase__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowercase__ = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowercase__ = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowercase__ = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _a ( UpperCamelCase__ , unittest.TestCase , UpperCamelCase__ ):
_lowercase : Optional[Any] = True
_lowercase : Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowercase : Any = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaxBlenderbotModelTester(self )
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_: Dict , UpperCamelCase_: int=None , **UpperCamelCase_: Dict ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowercase__ = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__ = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowercase__ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowercase__ = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__ = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase__ = np.ones((1, 1) ) * model.config.eos_token_id
lowercase__ = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
lowercase__ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowercase__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowercase__ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowercase__ = ['''Sam''']
lowercase__ = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowercase__ = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = '''Sam is a great name. It means "sun" in Gaelic.'''
lowercase__ = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 43 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
'''simple docstring'''
import math
import sys
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = ""
try:
with open(_lowerCAmelCase , "rb" ) as binary_file:
_lowerCamelCase : List[Any] = binary_file.read()
for dat in data:
_lowerCamelCase : Optional[Any] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {"0": "0", "1": "1"}
_lowerCamelCase , _lowerCamelCase : List[str] = "", ""
_lowerCamelCase : List[Any] = len(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCamelCase : Optional[Any] = lexicon[curr_string]
result += last_match_id
_lowerCamelCase : List[str] = last_match_id + "0"
if math.loga(_lowerCAmelCase ).is_integer():
_lowerCamelCase : Dict = {}
for curr_key in list(_lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = lexicon.pop(_lowerCAmelCase )
_lowerCamelCase : List[str] = new_lex
_lowerCamelCase : Union[str, Any] = last_match_id + "1"
index += 1
_lowerCamelCase : Union[str, Any] = ""
return result
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : str = 8
try:
with open(_lowerCAmelCase , "wb" ) as opened_file:
_lowerCamelCase : int = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowerCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCamelCase : Tuple = data_bits[counter:]
_lowerCamelCase : int = data_bits[counter + 1 :]
return data_bits
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = read_file_binary(_lowerCAmelCase )
_lowerCamelCase : List[str] = remove_prefix(_lowerCAmelCase )
_lowerCamelCase : List[Any] = decompress_data(_lowerCAmelCase )
write_file_binary(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 44 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful") | 45 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return abs(_lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = y, x % y
return abs(_lowerCamelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
try:
_lowerCamelCase : Any = input("Enter two integers separated by comma (,): " ).split("," )
_lowerCamelCase : Dict = int(nums[0] )
_lowerCamelCase : Tuple = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(_lowerCamelCase , _lowerCamelCase )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCamelCase , _lowerCamelCase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main() | 46 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if not sentence:
return ""
__a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
'''simple docstring'''
import random
def A ( UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = 0
while s % 2 == 0:
lowerCAmelCase__ = s // 2
t += 1
for _ in range(5 ):
lowerCAmelCase__ = random.randrange(2 , num - 1 )
lowerCAmelCase__ = pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if v != 1:
lowerCAmelCase__ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCAmelCase__ = i + 1
lowerCAmelCase__ = (v**2) % num
return True
def A ( UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
if num < 2:
return False
lowerCAmelCase__ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(UpperCamelCase_ )
def A ( UpperCamelCase_ : int = 10_24 ) -> int:
'''simple docstring'''
while True:
lowerCAmelCase__ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(UpperCamelCase_ ):
return num
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 48 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""num_attention_heads""" ) )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=6_40 ,_lowerCAmelCase=4 ,_lowerCAmelCase="silu" ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = last_hidden_size
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = conv_kernel_size
lowerCamelCase__ = output_stride
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = use_labels
lowerCamelCase__ = is_training
lowerCamelCase__ = num_labels
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MobileViTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileViTForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MobileViTModelTester(self )
lowerCamelCase__ = MobileViTConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = 5
self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ = 2
for i in range(len(_lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = MobileViTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(_lowerCAmelCase )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = model.to(_lowerCAmelCase )
lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**_lowerCAmelCase )
lowerCamelCase__ = outputs.logits
# verify the logits
lowerCamelCase__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] ,device=_lowerCAmelCase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = model.to(_lowerCAmelCase )
lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**_lowerCAmelCase )
lowerCamelCase__ = outputs.logits.detach().cpu()
lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ,target_sizes=[(50, 60)] )
lowerCamelCase__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,_lowerCAmelCase )
lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
lowerCamelCase__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,_lowerCAmelCase )
| 50 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A = random.Random()
def __A ( a_ :List[str] , a_ :int=1.0 , a_ :Optional[Any]=None , a_ :int=None) -> List[Any]:
if rng is None:
__a : int = global_rng
__a : Optional[Any] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=16000 , _UpperCAmelCase=True , _UpperCAmelCase=80 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase="hann_window" , _UpperCAmelCase=80 , _UpperCAmelCase=7600 , _UpperCAmelCase=1e-1_0 , _UpperCAmelCase=True , ):
__a : Optional[Any] = parent
__a : int = batch_size
__a : Optional[int] = min_seq_length
__a : Any = max_seq_length
__a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Union[str, Any] = feature_size
__a : Optional[int] = padding_value
__a : int = sampling_rate
__a : str = do_normalize
__a : int = num_mel_bins
__a : Dict = hop_length
__a : Dict = win_length
__a : Dict = win_function
__a : Optional[Any] = fmin
__a : Union[str, Any] = fmax
__a : Tuple = mel_floor
__a : Optional[Any] = return_attention_mask
def _lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Any = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
if equal_length:
__a : int = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Optional[int] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : List[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = SpeechTaFeatureExtractor
def _lowerCamelCase ( self ):
__a : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def _lowerCamelCase ( self , _UpperCAmelCase ):
self.assertTrue(np.all(np.mean(_UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__a : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test batched
__a : Any = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values
__a : List[Any] = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def _lowerCamelCase ( self ):
__a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
__a : Tuple = [None, 1600, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='''np''' )
__a : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : Any = range(800 , 1400 , 200 )
__a : Dict = [floats_list((1, x) )[0] for x in lengths]
__a : int = ['''longest''', '''max_length''', '''do_not_pad''']
__a : Any = [None, 1600, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
__a : int = feat_extract(_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase )
__a : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : int = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
__a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : List[str] = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
__a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__a : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Optional[int] = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
__a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : Optional[int] = np.random.rand(100 ).astype(np.floataa )
__a : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Tuple = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
__a : Union[str, Any] = feature_extractor(audio_target=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__a : Tuple = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
__a : int = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test batched
__a : Union[str, Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
__a : Union[str, Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__a : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : str = np.asarray(_UpperCAmelCase )
__a : List[str] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
__a : str = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
__a : Tuple = feat_extract.model_input_names[0]
__a : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) )
__a : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase )
__a : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
__a : Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self ):
__a : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase )
__a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
__a : List[str] = feat_extract.model_input_names[0]
__a : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
__a : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self ):
__a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__a : str = self.feat_extract_tester.prepare_inputs_for_target()
__a : Tuple = feat_extract.model_input_names[0]
__a : Optional[int] = BatchFeature({input_name: speech_inputs} )
__a : int = feat_extract.num_mel_bins # hack!
__a : Any = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
__a : Optional[int] = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feat_extract_dict
__a : str = True
__a : Dict = self.feature_extraction_class(**_UpperCAmelCase )
__a : int = self.feat_extract_tester.prepare_inputs_for_target()
__a : Optional[Any] = [len(_UpperCAmelCase ) for x in speech_inputs]
__a : Any = feat_extract.model_input_names[0]
__a : Dict = BatchFeature({input_name: speech_inputs} )
__a : Any = feat_extract.num_mel_bins # hack!
__a : Tuple = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Any = self.feat_extract_dict
__a : Dict = True
__a : List[str] = self.feature_extraction_class(**_UpperCAmelCase )
__a : str = self.feat_extract_tester.prepare_inputs_for_target()
__a : Optional[int] = [len(_UpperCAmelCase ) for x in speech_inputs]
__a : Tuple = feat_extract.model_input_names[0]
__a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
__a : Tuple = min(_UpperCAmelCase )
__a : str = feat_extract.num_mel_bins # hack!
__a : Dict = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowerCamelCase ( self , _UpperCAmelCase ):
from datasets import load_dataset
__a : Dict = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : Tuple = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
# fmt: off
__a : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
__a : Union[str, Any] = self._load_datasamples(1 )
__a : str = SpeechTaFeatureExtractor()
__a : List[str] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _UpperCAmelCase , atol=1e-6 ) )
def _lowerCamelCase ( self ):
# fmt: off
__a : Tuple = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
__a : Dict = self._load_datasamples(1 )
__a : Any = SpeechTaFeatureExtractor()
__a : List[Any] = feature_extractor(audio_target=_UpperCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCAmelCase , atol=1e-4 ) ) | 52 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowerCAmelCase = nums[0]
__lowerCAmelCase = 0
for num in nums[1:]:
__lowerCAmelCase , __lowerCAmelCase = (
max_excluding + num,
max(lowerCAmelCase_, lowerCAmelCase_ ),
)
return max(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowercase : str =logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase : Dict ="""
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a__ ( lowercase__ , lowercase__ , lowercase__=8 ):
'''simple docstring'''
UpperCAmelCase_ =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A ( __lowercase ):
def __init__( self: Optional[int] , _lowerCAmelCase: UNetaDConditionModel , _lowerCAmelCase: DDPMScheduler , _lowerCAmelCase: VQModel , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
UpperCAmelCase_ =2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Any ) -> str:
'''simple docstring'''
if latents is None:
UpperCAmelCase_ =randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
UpperCAmelCase_ =latents.to(_lowerCAmelCase )
UpperCAmelCase_ =latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Dict=0 ) -> int:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ =torch.device(F'cuda:{gpu_id}' )
UpperCAmelCase_ =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Dict=0 ) -> Tuple:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ =torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ =None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ =cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
UpperCAmelCase_ =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self: List[str] , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int = 512 , _lowerCAmelCase: int = 512 , _lowerCAmelCase: int = 100 , _lowerCAmelCase: float = 4.0 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self._execution_device
UpperCAmelCase_ =guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=0 )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=0 )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=0 )
UpperCAmelCase_ =image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase_ =image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
UpperCAmelCase_ =negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
UpperCAmelCase_ =hint.repeat_interleave(_lowerCAmelCase , dim=0 )
UpperCAmelCase_ =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
UpperCAmelCase_ =torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
UpperCAmelCase_ =self.scheduler.timesteps
UpperCAmelCase_ =self.movq.config.latent_channels
UpperCAmelCase_ , UpperCAmelCase_ =downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ ={"image_embeds": image_embeds, "hint": hint}
UpperCAmelCase_ =self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ =noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ =noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ =variance_pred.chunk(2 )
UpperCAmelCase_ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ =self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
UpperCAmelCase_ =self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCAmelCase_ =image * 0.5 + 0.5
UpperCAmelCase_ =image.clamp(0 , 1 )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ =self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 54 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"question": Value("string" ), "context": Value("string" )} )
snake_case_ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def UpperCamelCase_ ( self : int ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 55 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
'''simple docstring'''
def _a (lowercase__ : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__snake_case = set(range(3 , lowercase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase__ , lowercase__ ) ) )
__snake_case = [float(lowercase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase__ , limit + 1 , lowercase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
super().__init__()
UpperCamelCase_: List[str] = nn.ModuleList(_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = True , ):
for i, (image, scale, controlnet) in enumerate(zip(_lowerCamelCase , _lowerCamelCase , self.nets ) ):
UpperCamelCase_ ,UpperCamelCase_: Dict = controlnet(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# merge samples
if i == 0:
UpperCamelCase_ ,UpperCamelCase_: List[str] = down_samples, mid_sample
else:
UpperCamelCase_: Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_lowerCamelCase , _lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _a ( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , ):
UpperCamelCase_: int = 0
UpperCamelCase_: Optional[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_lowerCamelCase , is_main_process=_lowerCamelCase , save_function=_lowerCamelCase , safe_serialization=_lowerCamelCase , variant=_lowerCamelCase , )
idx += 1
UpperCamelCase_: Optional[Any] = model_path_to_save + f'''_{idx}'''
@classmethod
def _a ( cls , _lowerCamelCase , **_lowerCamelCase ):
UpperCamelCase_: List[Any] = 0
UpperCamelCase_: Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase_: int = pretrained_model_path
while os.path.isdir(_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = ControlNetModel.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
controlnets.append(_lowerCamelCase )
idx += 1
UpperCamelCase_: Union[str, Any] = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(_lowerCamelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(_lowerCamelCase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(_lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(_lowerCamelCase ) | 57 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = 1
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = []
snake_case_ : Optional[int] = []
for i in range(self.num_layers ):
snake_case_ : List[str] = self.in_channels if i == 0 else self.out_channels
snake_case_ : Optional[int] = FlaxResnetBlockaD(
in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
snake_case_ : Any = resnets
snake_case_ : Any = attentions
if self.add_downsample:
snake_case_ : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : List[Any] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
snake_case_ : Optional[int] = attn(_lowercase , _lowercase , deterministic=_lowercase )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : List[Any] = self.downsamplers_a(_lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = True
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = []
for i in range(self.num_layers ):
snake_case_ : int = self.in_channels if i == 0 else self.out_channels
snake_case_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Dict = resnets
if self.add_downsample:
snake_case_ : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase=True ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = ()
for resnet in self.resnets:
snake_case_ : List[Any] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Union[str, Any] = self.downsamplers_a(_lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = 1
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
for i in range(self.num_layers ):
snake_case_ : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Tuple = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
snake_case_ : Union[str, Any] = resnets
snake_case_ : Any = attentions
if self.add_upsample:
snake_case_ : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Union[str, Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Union[str, Any] = res_hidden_states_tuple[-1]
snake_case_ : Optional[Any] = res_hidden_states_tuple[:-1]
snake_case_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : List[str] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
snake_case_ : int = attn(_lowercase , _lowercase , deterministic=_lowercase )
if self.add_upsample:
snake_case_ : List[Any] = self.upsamplers_a(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = True
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : List[str] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Optional[int] = resnets
if self.add_upsample:
snake_case_ : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Optional[int]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : Optional[Any] = res_hidden_states_tuple[-1]
snake_case_ : List[str] = res_hidden_states_tuple[:-1]
snake_case_ : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Optional[int] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
if self.add_upsample:
snake_case_ : int = self.upsamplers_a(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = 1
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : Tuple = []
for _ in range(self.num_layers ):
snake_case_ : List[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
snake_case_ : List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Optional[Any] = resnets
snake_case_ : List[Any] = attentions
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.resnets[0](_lowercase , _lowercase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Dict = attn(_lowercase , _lowercase , deterministic=_lowercase )
snake_case_ : Optional[int] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
return hidden_states
| 58 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : str = "cpu" , UpperCAmelCase_ : str = "openai/clip-vit-large-patch14") ->None:
'''simple docstring'''
lowerCamelCase__: Tuple =device
lowerCamelCase__: Optional[int] =CLIPTokenizerFast.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Any =[0.4814_5466, 0.457_8275, 0.4082_1073]
lowerCamelCase__: Union[str, Any] =[0.2686_2954, 0.2613_0258, 0.2757_7711]
lowerCamelCase__: Union[str, Any] =torchvision.transforms.Normalize(self.image_mean , self.image_std)
lowerCamelCase__: List[Any] =torchvision.transforms.Resize(224)
lowerCamelCase__: Dict =torchvision.transforms.CenterCrop(224)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.resize(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.center_crop(UpperCAmelCase_)
lowerCamelCase__: Dict =self.normalize(UpperCAmelCase_)
return images
def __call__(self : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.tokenizer(text=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: str =self.preprocess_img(UpperCAmelCase_)
lowerCamelCase__: str ={key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : int=0.01 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str="image" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=False , ) ->None:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: str =device if device else get_device()
if vqgan:
lowerCamelCase__: Dict =vqgan
else:
lowerCamelCase__: Optional[Any] =load_vqgan(self.device , conf_path=UpperCAmelCase_ , ckpt_path=UpperCAmelCase_)
self.vqgan.eval()
if clip:
lowerCamelCase__: Union[str, Any] =clip
else:
lowerCamelCase__: str =CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
lowerCamelCase__: str =ProcessorGradientFlow(device=self.device)
lowerCamelCase__: Union[str, Any] =iterations
lowerCamelCase__: str =lr
lowerCamelCase__: str =log
lowerCamelCase__: str =make_grid
lowerCamelCase__: int =return_val
lowerCamelCase__: int =quantize
lowerCamelCase__: Dict =self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Union[str, Any]=True) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =[]
if output_path is None:
lowerCamelCase__: Union[str, Any] ="./animation.gif"
if input_path is None:
lowerCamelCase__: List[Any] =self.save_path
lowerCamelCase__: Any =sorted(glob(input_path + "/*"))
if not len(UpperCAmelCase_):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(UpperCAmelCase_) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
lowerCamelCase__: Tuple =total_duration / len(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[frame_duration] * len(UpperCAmelCase_)
if extend_frames:
lowerCamelCase__: int =1.5
lowerCamelCase__: Optional[int] =3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(UpperCAmelCase_))
imageio.mimsave(UpperCAmelCase_ , UpperCAmelCase_ , duration=UpperCAmelCase_)
print(F"""gif saved to {output_path}""")
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None) ->Tuple:
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
lowerCamelCase__: str =preprocess(Image.open(UpperCAmelCase_) , target_image_size=256).to(self.device)
lowerCamelCase__: Any =preprocess_vqgan(UpperCAmelCase_)
lowerCamelCase__ , *lowerCamelCase__: str =self.vqgan.encode(UpperCAmelCase_)
return z
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.latent.detach().requires_grad_()
lowerCamelCase__: List[Any] =base_latent + transform_vector
if self.quantize:
lowerCamelCase__ , *lowerCamelCase__: int =self.vqgan.quantize(UpperCAmelCase_)
else:
lowerCamelCase__: str =trans_latent
return self.vqgan.decode(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=None) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.clip_preprocessor(text=UpperCAmelCase_ , images=UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_)
lowerCamelCase__: List[str] =self.clip(**UpperCAmelCase_)
lowerCamelCase__: int =clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase__: Optional[int] =similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self._get_clip_similarity(pos_prompts["prompts"] , UpperCAmelCase_ , weights=(1 / pos_prompts["weights"]))
if neg_prompts:
lowerCamelCase__: Tuple =self._get_clip_similarity(neg_prompts["prompts"] , UpperCAmelCase_ , weights=neg_prompts["weights"])
else:
lowerCamelCase__: List[Any] =torch.tensor([1] , device=self.device)
lowerCamelCase__: Union[str, Any] =-torch.log(UpperCAmelCase_) + torch.log(UpperCAmelCase_)
return loss
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =torch.randn_like(self.latent , requires_grad=UpperCAmelCase_ , device=self.device)
lowerCamelCase__: Any =torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
lowerCamelCase__: List[Any] =self._add_vector(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =loop_post_process(UpperCAmelCase_)
lowerCamelCase__: str =self._get_CLIP_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
print("CLIP loss" , UpperCAmelCase_)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=UpperCAmelCase_)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
wandb.init(reinit=UpperCAmelCase_ , project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
lowerCamelCase__: Dict =Image.open(UpperCAmelCase_)
lowerCamelCase__: str =image.resize((256, 256))
wandb.log("Original Image" , wandb.Image(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Optional[int]) ->int:
'''simple docstring'''
if not prompts:
return []
lowerCamelCase__: Optional[Any] =[]
lowerCamelCase__: Union[str, Any] =[]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Tuple =[prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(UpperCAmelCase_ , (tuple, list)):
lowerCamelCase__: Optional[Any] =prompt[0]
lowerCamelCase__: Dict =float(prompt[1])
elif ":" in prompt:
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =prompt.split(":")
lowerCamelCase__: str =float(UpperCAmelCase_)
else:
lowerCamelCase__: List[str] =prompt
lowerCamelCase__: Any =1.0
processed_prompts.append(UpperCAmelCase_)
weights.append(UpperCAmelCase_)
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCAmelCase_ , device=self.device),
}
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , ) ->List[str]:
'''simple docstring'''
if image_path:
lowerCamelCase__: Any =self._get_latent(UpperCAmelCase_)
else:
lowerCamelCase__: int =torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase__: Optional[int] =self.process_prompts(UpperCAmelCase_)
lowerCamelCase__: Dict =self.process_prompts(UpperCAmelCase_)
if save_final and save_path is None:
lowerCamelCase__: Optional[Any] =os.path.join("./outputs/" , "_".join(pos_prompts["prompts"]))
if not os.path.exists(UpperCAmelCase_):
os.makedirs(UpperCAmelCase_)
else:
lowerCamelCase__: Dict =save_path + "_" + get_timestamp()
os.makedirs(UpperCAmelCase_)
lowerCamelCase__: int =save_path
lowerCamelCase__: Dict =self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(UpperCAmelCase_))
lowerCamelCase__: List[Any] =loop_post_process(UpperCAmelCase_)
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)):
if show_intermediate:
show_pil(UpperCAmelCase_)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png"""))
if self.log:
wandb.log({"Image": wandb.Image(UpperCAmelCase_)})
if show_final:
show_pil(UpperCAmelCase_)
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png"""))
| 59 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
from collections.abc import Iterable
from typing import Any
class __lowerCAmelCase :
def __init__(self , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = value
snake_case_ : Node | None = None # Added in order to delete a node easier
snake_case_ : Node | None = None
snake_case_ : Node | None = None
def __repr__(self ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class __lowerCAmelCase :
def __init__(self , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = root
def __str__(self ) -> str:
'''simple docstring'''
return str(self.root )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
snake_case_ : List[Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
snake_case_ : List[Any] = new_children
else:
snake_case_ : List[Any] = new_children
else:
snake_case_ : int = new_children
def lowerCamelCase (self , __magic_name__ ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
return self.root is None
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : Any = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
snake_case_ : int = new_node # set its root
else: # Tree is not empty
snake_case_ : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
snake_case_ : str = new_node # We insert the new node in a leaf
break
else:
snake_case_ : int = parent_node.left
else:
if parent_node.right is None:
snake_case_ : Union[str, Any] = new_node
break
else:
snake_case_ : Optional[Any] = parent_node.right
snake_case_ : Optional[Any] = parent_node
def lowerCamelCase (self , *__magic_name__ ) -> None:
'''simple docstring'''
for value in values:
self.__insert(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
snake_case_ : int = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
snake_case_ : Tuple = node.left if value < node.value else node.right
return node
def lowerCamelCase (self , __magic_name__ = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
snake_case_ : Optional[int] = self.root
if not self.empty():
while node.right is not None:
snake_case_ : Dict = node.right
return node
def lowerCamelCase (self , __magic_name__ = None ) -> Node | None:
'''simple docstring'''
if node is None:
snake_case_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
snake_case_ : Union[str, Any] = self.root
while node.left is not None:
snake_case_ : Tuple = node.left
return node
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : int = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
snake_case_ : Optional[Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
snake_case_ : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCamelCase (self , __magic_name__ ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCamelCase (self , __magic_name__=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( _UpperCamelCase ) -> list[Node]:
"""simple docstring"""
snake_case_ : str = []
if curr_node is not None:
snake_case_ : Tuple = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : Optional[int] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
snake_case_ : Optional[Any] = BinarySearchTree()
for i in testlist:
t.insert(_UpperCamelCase )
# Prints all the elements of the list in order traversal
print(_UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_UpperCamelCase )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 60 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = VideoMAEConfig()
set_architecture_configs(lowerCAmelCase_ , lowerCAmelCase_ )
if "finetuned" not in model_name:
lowerCAmelCase__ = False
if "finetuned" in model_name:
lowerCAmelCase__ = "huggingface/label-files"
if "kinetics" in model_name:
lowerCAmelCase__ = 400
lowerCAmelCase__ = "kinetics400-id2label.json"
elif "ssv2" in model_name:
lowerCAmelCase__ = 174
lowerCAmelCase__ = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
lowerCAmelCase__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
"""simple docstring"""
if "small" in model_name:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1536
lowerCAmelCase__ = 12
lowerCAmelCase__ = 16
lowerCAmelCase__ = 12
lowerCAmelCase__ = 3
lowerCAmelCase__ = 192
lowerCAmelCase__ = 768
elif "large" in model_name:
lowerCAmelCase__ = 1024
lowerCAmelCase__ = 4096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 12
lowerCAmelCase__ = 8
lowerCAmelCase__ = 512
lowerCAmelCase__ = 2048
elif "huge" in model_name:
lowerCAmelCase__ = 1280
lowerCAmelCase__ = 5120
lowerCAmelCase__ = 32
lowerCAmelCase__ = 16
lowerCAmelCase__ = 12
lowerCAmelCase__ = 8
lowerCAmelCase__ = 640
lowerCAmelCase__ = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
if "encoder." in name:
lowerCAmelCase__ = name.replace("encoder." , "" )
if "cls_token" in name:
lowerCAmelCase__ = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
lowerCAmelCase__ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase__ = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
lowerCAmelCase__ = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowerCAmelCase__ = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
lowerCAmelCase__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
lowerCAmelCase__ = name.replace("attn" , "attention.self" )
if "attn" in name:
lowerCAmelCase__ = name.replace("attn" , "attention.attention" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase__ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowerCAmelCase__ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCAmelCase__ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCAmelCase__ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase__ = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase__ = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
lowerCAmelCase__ = name.replace("head" , "classifier" )
return name
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(lowerCAmelCase_ )
if key.startswith("encoder." ):
lowerCAmelCase__ = key.replace("encoder." , "" )
if "qkv" in key:
lowerCAmelCase__ = key.split("." )
if key.startswith("decoder.blocks" ):
lowerCAmelCase__ = config.decoder_hidden_size
lowerCAmelCase__ = int(key_split[2] )
lowerCAmelCase__ = "decoder.decoder_layers."
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = config.hidden_size
lowerCAmelCase__ = int(key_split[1] )
lowerCAmelCase__ = "videomae.encoder.layer."
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val
return orig_state_dict
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCAmelCase__ = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = get_videomae_config(lowerCAmelCase_ )
if "finetuned" in model_name:
lowerCAmelCase__ = VideoMAEForVideoClassification(lowerCAmelCase_ )
else:
lowerCAmelCase__ = VideoMAEForPreTraining(lowerCAmelCase_ )
# download original checkpoint, hosted on Google Drive
lowerCAmelCase__ = "pytorch_model.bin"
gdown.cached_download(lowerCAmelCase_ , lowerCAmelCase_ , quiet=lowerCAmelCase_ )
lowerCAmelCase__ = torch.load(lowerCAmelCase_ , map_location="cpu" )
if "model" in files:
lowerCAmelCase__ = files["model"]
else:
lowerCAmelCase__ = files["module"]
lowerCAmelCase__ = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# verify model on basic input
lowerCAmelCase__ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(lowerCAmelCase_ , return_tensors="pt" )
if "finetuned" not in model_name:
lowerCAmelCase__ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowerCAmelCase__ = torch.load(lowerCAmelCase_ )
lowerCAmelCase__ = model(**lowerCAmelCase_ )
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCAmelCase__ = torch.Size([1, 174] )
lowerCAmelCase__ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCAmelCase__ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCAmelCase__ = torch.Size([1, 400] )
lowerCAmelCase__ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCAmelCase__ = torch.Size([1, 174] )
lowerCAmelCase__ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowerCAmelCase__ = torch.Size([1, 1408, 1536] )
lowerCAmelCase__ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCAmelCase__ = torch.Size([1, 174] )
lowerCAmelCase__ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCAmelCase__ = outputs.loss
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(lowerCAmelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 61 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = AlbertConfig.from_json_file(lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE : Optional[int] = AlbertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 62 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
class a :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = {}
def UpperCAmelCase ( self : str ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowercase , """ -> """ , """ -> """.join([str(__lowercase ) for j in self.vertex[i]] ) )
def UpperCAmelCase ( self : Tuple , __lowercase : int , __lowercase : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowercase )
else:
# else make a new vertex
__UpperCAmelCase : Optional[int] = [to_vertex]
def UpperCAmelCase ( self : int ) -> None:
# visited array for storing already visited nodes
__UpperCAmelCase : Union[str, Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : int , __lowercase : list ) -> None:
# mark start vertex as visited
__UpperCAmelCase : Optional[Any] = True
print(__lowercase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowercase , __lowercase )
if __name__ == "__main__":
a : Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 63 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowerCamelCase :
__a = 42
__a = None
__a = None
def A__ ( snake_case_ : TreeNode | None ):
# Validation
def is_valid_tree(snake_case_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case_ , snake_case_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case_ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
snake_case_ : TreeNode | None , snake_case_ : float , snake_case_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case_ )
)
return is_binary_search_tree_recursive_check(snake_case_ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """swin2sr"""
snake_case_ = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] ,A : Tuple=64 ,A : Tuple=1 ,A : List[Any]=3 ,A : Dict=180 ,A : int=[6, 6, 6, 6, 6, 6] ,A : Optional[Any]=[6, 6, 6, 6, 6, 6] ,A : Any=8 ,A : Optional[int]=2.0 ,A : Tuple=True ,A : int=0.0 ,A : Dict=0.0 ,A : int=0.1 ,A : List[Any]="gelu" ,A : Optional[Any]=False ,A : Union[str, Any]=0.0_2 ,A : str=1e-5 ,A : Optional[int]=2 ,A : int=1.0 ,A : Any="1conv" ,A : Tuple="pixelshuffle" ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(**A )
UpperCAmelCase__ : List[Any] = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : str = embed_dim
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : Union[str, Any] = len(A )
UpperCAmelCase__ : str = num_heads
UpperCAmelCase__ : int = window_size
UpperCAmelCase__ : int = mlp_ratio
UpperCAmelCase__ : Any = qkv_bias
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = drop_path_rate
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : List[Any] = use_absolute_embeddings
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Dict = upscale
UpperCAmelCase__ : str = img_range
UpperCAmelCase__ : int = resi_connection
UpperCAmelCase__ : str = upsampler
| 65 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ViTFeatureExtractor"]
UpperCamelCase = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Dict , snake_case__ :List[str] ) -> List[Any]:
# Initialise PyTorch model
_lowercase = BertConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowercase = BertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 67 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
def lowercase__ ( A_: Optional[int] , A_: Optional[Any] , A_: Optional[int] ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A_ , n - 1 , A_ ) * a) % mod
else:
__UpperCAmelCase =binary_exponentiation(A_ , n / 2 , A_ )
return (b * b) % mod
# a prime number
__A = 7_01
__A = 10_00_00_00_00
__A = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 68 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( _UpperCAmelCase : list[float] , _UpperCAmelCase : int ) -> List[Any]:
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_UpperCAmelCase ):
print(F'''{i}\t\t{d}''' )
def __UpperCAmelCase ( _UpperCAmelCase : list[dict[str, int]] , _UpperCAmelCase : list[float] , _UpperCAmelCase : int ) -> Union[str, Any]:
for j in range(_UpperCAmelCase ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __UpperCAmelCase ( _UpperCAmelCase : list[dict[str, int]] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> list[float]:
__snake_case = [float("inf" )] * vertex_count
__snake_case = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__snake_case = distance[u] + w
__snake_case = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a : int = int(input('''Enter number of vertices: ''').strip())
a : List[str] = int(input('''Enter number of edges: ''').strip())
a : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
a , a , a : str = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
a : List[str] = {'''src''': src, '''dst''': dest, '''weight''': weight}
a : int = int(input('''\nEnter shortest path source:''').strip())
a : int = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 69 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : Any = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 70 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : jnp.ndarray
@flax_register_to_config
class _snake_case (nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : int =32
__A : int =4
__A : int =4
__A : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__A : Tuple[str] =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__A : Union[bool, Tuple[bool]] =False
__A : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
__A : int =2
__A : Union[int, Tuple[int]] =8
__A : Optional[Union[int, Tuple[int]]] =None
__A : int =12_80
__A : float =0.0
__A : bool =False
__A : jnp.dtype =jnp.floataa
__A : bool =True
__A : int =0
__A : bool =False
def UpperCamelCase__ ( self ,_snake_case ):
# init input tensors
UpperCAmelCase_ : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Tuple = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ : Optional[Any] = jnp.ones((1,) ,dtype=jnp.intaa )
UpperCAmelCase_ : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = jax.random.split(_snake_case )
UpperCAmelCase_ : int = {"params": params_rng, "dropout": dropout_rng}
return self.init(_snake_case ,_snake_case ,_snake_case ,_snake_case )["params"]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.block_out_channels
UpperCAmelCase_ : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : List[Any] = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
UpperCAmelCase_ : Tuple = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
UpperCAmelCase_ : int = FlaxTimestepEmbedding(_snake_case ,dtype=self.dtype )
UpperCAmelCase_ : str = self.only_cross_attention
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : Any = output_channel
UpperCAmelCase_ : List[Any] = block_out_channels[i]
UpperCAmelCase_ : str = i == len(_snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : Optional[Any] = FlaxCrossAttnDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : Optional[int] = FlaxDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(_snake_case )
UpperCAmelCase_ : Any = down_blocks
# mid
UpperCAmelCase_ : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
# up
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case ) )
UpperCAmelCase_ : str = list(reversed(_snake_case ) )
UpperCAmelCase_ : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
UpperCAmelCase_ : Tuple = output_channel
UpperCAmelCase_ : Any = reversed_block_out_channels[i]
UpperCAmelCase_ : List[Any] = reversed_block_out_channels[min(i + 1 ,len(_snake_case ) - 1 )]
UpperCAmelCase_ : Optional[Any] = i == len(_snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
UpperCAmelCase_ : Dict = FlaxCrossAttnUpBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,prev_output_channel=_snake_case ,num_layers=self.layers_per_block + 1 ,num_attention_heads=reversed_num_attention_heads[i] ,add_upsample=not is_final_block ,dropout=self.dropout ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : str = FlaxUpBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,prev_output_channel=_snake_case ,num_layers=self.layers_per_block + 1 ,add_upsample=not is_final_block ,dropout=self.dropout ,dtype=self.dtype ,)
up_blocks.append(_snake_case )
UpperCAmelCase_ : List[Any] = output_channel
UpperCAmelCase_ : List[str] = up_blocks
# out
UpperCAmelCase_ : Optional[int] = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
UpperCAmelCase_ : str = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case = True ,_snake_case = False ,):
# 1. time
if not isinstance(_snake_case ,jnp.ndarray ):
UpperCAmelCase_ : Optional[int] = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(_snake_case ,jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[str] = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : Optional[int] = jnp.expand_dims(_snake_case ,0 )
UpperCAmelCase_ : Union[str, Any] = self.time_proj(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.time_embedding(_snake_case )
# 2. pre-process
UpperCAmelCase_ : Optional[int] = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : Union[str, Any] = self.conv_in(_snake_case )
# 3. down
UpperCAmelCase_ : int = (sample,)
for down_block in self.down_blocks:
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = down_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = down_block(_snake_case ,_snake_case ,deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
UpperCAmelCase_ : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_snake_case ,_snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : Optional[Any] = new_down_block_res_samples
# 4. mid
UpperCAmelCase_ : Dict = self.mid_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
UpperCAmelCase_ : List[str] = down_block_res_samples[-(self.layers_per_block + 1) :]
UpperCAmelCase_ : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Dict = up_block(
_snake_case ,temb=_snake_case ,encoder_hidden_states=_snake_case ,res_hidden_states_tuple=_snake_case ,deterministic=not train ,)
else:
UpperCAmelCase_ : Tuple = up_block(_snake_case ,temb=_snake_case ,res_hidden_states_tuple=_snake_case ,deterministic=not train )
# 6. post-process
UpperCAmelCase_ : Optional[int] = self.conv_norm_out(_snake_case )
UpperCAmelCase_ : List[Any] = nn.silu(_snake_case )
UpperCAmelCase_ : str = self.conv_out(_snake_case )
UpperCAmelCase_ : str = jnp.transpose(_snake_case ,(0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_snake_case )
| 71 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
def __init__( self , snake_case_ ):
lowercase =data
lowercase =[0X6745_2301, 0XEFCD_AB89, 0X98BA_DCFE, 0X1032_5476, 0XC3D2_E1F0]
@staticmethod
def _A( snake_case_ , snake_case_ ):
return ((n << b) | (n >> (32 - b))) & 0XFFFF_FFFF
def _A( self ):
lowercase =B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
lowercase =self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _A( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _A( self , snake_case_ ):
lowercase =list(struct.unpack('''>16L''' , snake_case_ ) ) + [0] * 64
for i in range(16 , 80 ):
lowercase =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _A( self ):
lowercase =self.padding()
lowercase =self.split_blocks()
for block in self.blocks:
lowercase =self.expand_block(snake_case_ )
lowercase , lowercase , lowercase , lowercase , lowercase =self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowercase =(b & c) | ((~b) & d)
lowercase =0X5A82_7999
elif 20 <= i < 40:
lowercase =b ^ c ^ d
lowercase =0X6ED9_EBA1
elif 40 <= i < 60:
lowercase =(b & c) | (b & d) | (c & d)
lowercase =0X8F1B_BCDC
elif 60 <= i < 80:
lowercase =b ^ c ^ d
lowercase =0XCA62_C1D6
lowercase , lowercase , lowercase , lowercase , lowercase =(
self.rotate(snake_case_ , 5 ) + f + e + k + expanded_block[i] & 0XFFFF_FFFF,
a,
self.rotate(snake_case_ , 30 ),
c,
d,
)
lowercase =(
self.h[0] + a & 0XFFFF_FFFF,
self.h[1] + b & 0XFFFF_FFFF,
self.h[2] + c & 0XFFFF_FFFF,
self.h[3] + d & 0XFFFF_FFFF,
self.h[4] + e & 0XFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase =b'''Test String'''
assert SHAaHash(lowercase_ ).final_hash() == hashlib.shaa(lowercase_ ).hexdigest() # noqa: S324
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase =argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
lowercase =parser.parse_args()
lowercase =args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
lowercase =f.read()
else:
lowercase =bytes(lowercase_ , '''utf-8''' )
print(SHAaHash(lowercase_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = load_tool('text-classification')
self.tool.setup()
SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
| 73 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _A : List[str] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : int = 13
__SCREAMING_SNAKE_CASE : Any = 7
__SCREAMING_SNAKE_CASE : List[str] = 30
__SCREAMING_SNAKE_CASE : List[Any] = self.seq_length + self.mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = 15
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = 99
__SCREAMING_SNAKE_CASE : Optional[Any] = [10, 50, 80]
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : Dict = 4
__SCREAMING_SNAKE_CASE : Union[str, Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = 128
__SCREAMING_SNAKE_CASE : Optional[int] = 2
__SCREAMING_SNAKE_CASE : Optional[Any] = 2
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : int = 3
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 0.01
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : str , _A : Any , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = TFTransfoXLModel(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = model(_A ).to_tuple()
__SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids_a, '''mems''': mems_a}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ ( self : Dict , _A : int , _A : List[str] , _A : int , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = TFTransfoXLLMHeadModel(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ).to_tuple()
__SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = model(_A ).to_tuple()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = model([input_ids_a, mems_a] ).to_tuple()
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = model(_A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ ( self : Dict , _A : Tuple , _A : int , _A : Dict , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TFTransfoXLForSequenceClassification(_A )
__SCREAMING_SNAKE_CASE : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase_ = () if is_tf_available() else ()
lowerCAmelCase_ = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFTransfoXLModelTester(self )
__SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_A , d_embed=37 )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.model_tester.set_seed()
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self.model_tester.set_seed()
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[int] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Dict = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__SCREAMING_SNAKE_CASE : List[Any] = model.get_output_embeddings()
assert isinstance(_A , tf.keras.layers.Layer )
__SCREAMING_SNAKE_CASE : Any = model.get_bias()
assert name is None
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = model.get_output_embeddings()
assert x is None
__SCREAMING_SNAKE_CASE : Dict = model.get_bias()
assert name is None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = TFTransfoXLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__SCREAMING_SNAKE_CASE : List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__SCREAMING_SNAKE_CASE : Optional[int] = model.generate(_A , max_length=200 , do_sample=_A )
self.assertListEqual(output_ids[0].numpy().tolist() , _A )
| 74 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Tuple , _A : Any , _A : List[str]=13 , _A : Optional[int]=[30, 30] , _A : List[str]=2 , _A : Union[str, Any]=3 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Tuple=32 , _A : Optional[Any]=5 , _A : List[Any]=4 , _A : Any=37 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Tuple=10 , _A : List[Any]=0.0_2 , _A : Any=3 , _A : Optional[int]=None , _A : Tuple=8 , _A : Optional[Any]=10 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : str = num_labels
UpperCAmelCase__ : List[str] = scope
UpperCAmelCase__ : Union[str, Any] = n_targets
UpperCAmelCase__ : int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase__ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase__ : List[Any] = []
for i in range(self.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_A )
UpperCAmelCase__ : Union[str, Any] = torch.rand(self.n_targets , 4 , device=_A )
labels.append(_A )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = YolosForObjectDetection(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(pixel_values=_A )
UpperCAmelCase__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase__ : Dict = model(pixel_values=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase__ : int = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : str = torch.ones(
size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long )
UpperCAmelCase__ : str = torch.ones(
self.model_tester.n_targets , 4 , device=_A , dtype=torch.float )
labels.append(_A )
UpperCAmelCase__ : str = labels
return inputs_dict
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = YolosModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
UpperCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = True
# in YOLOS, the seq_len is different
UpperCAmelCase__ : Union[str, Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Any = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase__ : str = len(_A )
# Check attention is always last and order is fine
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = 1
self.assertEqual(out_len + added_hidden_states , len(_A ) )
UpperCAmelCase__ : List[str] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(_A : Optional[int] , _A : int , _A : List[str] ):
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Optional[int] = outputs.hidden_states
UpperCAmelCase__ : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
# YOLOS has a different seq_length
UpperCAmelCase__ : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Any = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = YolosModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Dict:
UpperCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_A )
UpperCAmelCase__ : Optional[Any] = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase__ : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Dict = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_A , )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase__ : Any = image_processor.post_process_object_detection(
_A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase__ : str = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_A )
UpperCAmelCase__ : Any = [75, 75, 17, 63, 17]
UpperCAmelCase__ : int = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_A )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , _A , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , _A )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _A ) )
| 75 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="sew-d"
def __init__( self , UpperCamelCase_=32 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_=2 , UpperCamelCase_=5_12 , UpperCamelCase_=2_56 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=("p2c", "c2p") , UpperCamelCase_="layer_norm" , UpperCamelCase_="gelu_python" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-7 , UpperCamelCase_=1E-5 , UpperCamelCase_="group" , UpperCamelCase_="gelu" , UpperCamelCase_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase_=False , UpperCamelCase_=1_28 , UpperCamelCase_=16 , UpperCamelCase_=True , UpperCamelCase_=0.0_5 , UpperCamelCase_=10 , UpperCamelCase_=2 , UpperCamelCase_=0.0 , UpperCamelCase_=10 , UpperCamelCase_=0 , UpperCamelCase_="mean" , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2_56 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , **UpperCamelCase_ , ) -> Tuple:
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__lowercase : Optional[int] = hidden_size
__lowercase : Optional[int] = feat_extract_norm
__lowercase : Dict = feat_extract_activation
__lowercase : Optional[Any] = list(UpperCamelCase_ )
__lowercase : Optional[Any] = list(UpperCamelCase_ )
__lowercase : str = list(UpperCamelCase_ )
__lowercase : List[Any] = conv_bias
__lowercase : Optional[int] = num_conv_pos_embeddings
__lowercase : Any = num_conv_pos_embedding_groups
__lowercase : str = len(self.conv_dim )
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : List[Any] = intermediate_size
__lowercase : List[str] = squeeze_factor
__lowercase : List[str] = max_position_embeddings
__lowercase : Optional[Any] = position_buckets
__lowercase : Optional[int] = share_att_key
__lowercase : Tuple = relative_attention
__lowercase : Optional[Any] = norm_rel_ebd
__lowercase : Optional[Any] = list(UpperCamelCase_ )
__lowercase : Union[str, Any] = hidden_act
__lowercase : str = num_attention_heads
__lowercase : Optional[Any] = hidden_dropout
__lowercase : Any = attention_dropout
__lowercase : List[Any] = activation_dropout
__lowercase : List[str] = feat_proj_dropout
__lowercase : Union[str, Any] = final_dropout
__lowercase : Optional[Any] = layer_norm_eps
__lowercase : Optional[Any] = feature_layer_norm_eps
__lowercase : List[Any] = initializer_range
__lowercase : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase : Dict = apply_spec_augment
__lowercase : str = mask_time_prob
__lowercase : List[Any] = mask_time_length
__lowercase : Tuple = mask_time_min_masks
__lowercase : Union[str, Any] = mask_feature_prob
__lowercase : List[Any] = mask_feature_length
__lowercase : Any = mask_feature_min_masks
# ctc loss
__lowercase : Dict = ctc_loss_reduction
__lowercase : List[Any] = ctc_zero_infinity
# sequence classification
__lowercase : Optional[int] = use_weighted_layer_sum
__lowercase : Optional[int] = classifier_proj_size
@property
def _lowerCamelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 76 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class a__ ( __magic_name__ ):
lowercase_ = "vivit"
def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict=224 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : List[Any]=[2, 16, 16] , UpperCamelCase_ : str=3 , UpperCamelCase_ : Tuple=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Any=3072 , UpperCamelCase_ : int="gelu_fast" , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : Tuple=1e-06 , UpperCamelCase_ : Optional[int]=True , **UpperCamelCase_ : int , ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Optional[Any] = num_frames
__UpperCAmelCase : List[Any] = tubelet_size
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : Optional[int] = qkv_bias
super().__init__(**UpperCamelCase_)
| 77 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: List[str] =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : str = ReformerTokenizer
a__ : Optional[Any] = ReformerTokenizerFast
a__ : Any = True
a__ : Union[str, Any] = False
a__ : int = True
def _lowercase (self : List[str] ):
super().setUp()
UpperCAmelCase_ = ReformerTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : int ):
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__a ) , 1000 )
def _lowercase (self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowercase (self : Dict ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Union[str, Any] , __a : List[str]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def _lowercase (self : Optional[Any] ):
pass
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = ReformerTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowercase (self : Optional[int] ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def _lowercase (self : Tuple ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = " ".join(__a )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(__a , return_tensors="pt" )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
UpperCAmelCase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ = encoded_sequence["input_ids"].shape
UpperCAmelCase_ = ReformerModel(__a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def _lowercase (self : Dict ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=__a , sequences=__a , )
| 78 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
def _lowerCamelCase ( __lowerCamelCase ) -> list:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [0] * len(__lowerCamelCase )
for i in range(1 , len(__lowerCamelCase ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase__ : List[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase__ : List[str] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase__ : int = j
return prefix_result
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
return max(prefix_function(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[str] = logging.getLogger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class __UpperCamelCase :
__snake_case :str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__snake_case :str = field(metadata={'help': 'Should contain the data files for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
__lowercase = processors[data_args.task_name]()
__lowercase = processor.get_labels()
__lowercase = len(lowerCamelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__lowercase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowercase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase ) -> Dict:
__lowercase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase , p.label_ids )}
# Data collator
__lowercase = DataCollatorWithPadding(lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , data_collator=lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase , lowerCamelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 80 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__snake_case : int = 1_9_2
__snake_case : List[Any] = 7_6_8
__snake_case : Union[str, Any] = 1_2
__snake_case : Optional[Any] = 3
__snake_case : Any = [8_0_0, 1_3_3_3]
__snake_case : List[str] = False
elif yolos_name == "yolos_s_dWr":
__snake_case : List[str] = 3_3_0
__snake_case : Dict = 1_4
__snake_case : List[str] = 6
__snake_case : Union[str, Any] = 1_3_2_0
elif "yolos_s" in yolos_name:
__snake_case : Union[str, Any] = 3_8_4
__snake_case : Dict = 1_5_3_6
__snake_case : int = 1_2
__snake_case : Optional[Any] = 6
elif "yolos_b" in yolos_name:
__snake_case : Union[str, Any] = [8_0_0, 1_3_4_4]
__snake_case : Optional[int] = 9_1
__snake_case : Any = "huggingface/label-files"
__snake_case : Optional[int] = "coco-detection-id2label.json"
__snake_case : Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : Dict = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : str = idalabel
__snake_case : int = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__snake_case : int = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[: config.hidden_size, :]
__snake_case : Dict = in_proj_bias[: config.hidden_size]
__snake_case : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : List[Any] = in_proj_weight[-config.hidden_size :, :]
__snake_case : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __lowerCamelCase ):
if "backbone" in name:
__snake_case : Any = name.replace("backbone" , "vit" )
if "cls_token" in name:
__snake_case : Any = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
__snake_case : Optional[Any] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
__snake_case : Optional[int] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
__snake_case : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__snake_case : List[str] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
__snake_case : Dict = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
__snake_case : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__snake_case : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
__snake_case : Union[str, Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__snake_case : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__snake_case : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__snake_case : List[str] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
__snake_case : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
__snake_case : List[Any] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
__snake_case : Union[str, Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for key in orig_state_dict.copy().keys():
__snake_case : str = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
__snake_case : Tuple = key.split("." )
__snake_case : List[Any] = int(key_split[2] )
__snake_case : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__snake_case : Optional[Any] = val[:dim, :]
__snake_case : Union[str, Any] = val[
dim : dim * 2, :
]
__snake_case : List[Any] = val[-dim:, :]
else:
__snake_case : Tuple = val[:dim]
__snake_case : Optional[int] = val[dim : dim * 2]
__snake_case : Tuple = val[-dim:]
else:
__snake_case : Tuple = val
return orig_state_dict
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : str = get_yolos_config(__lowerCamelCase )
# load original state_dict
__snake_case : int = torch.load(__lowerCamelCase , map_location="cpu" )["model"]
# load 🤗 model
__snake_case : str = YolosForObjectDetection(__lowerCamelCase )
model.eval()
__snake_case : Dict = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__snake_case : Dict = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
__snake_case : str = YolosImageProcessor(format="coco_detection" , size=__lowerCamelCase )
__snake_case : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
__snake_case : Optional[Any] = model(**__lowerCamelCase )
__snake_case , __snake_case : Tuple = outputs.logits, outputs.pred_boxes
__snake_case , __snake_case : Tuple = None, None
if yolos_name == "yolos_ti":
__snake_case : List[str] = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
__snake_case : str = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
__snake_case : List[str] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
__snake_case : Union[str, Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
__snake_case : Tuple = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
__snake_case : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
__snake_case : Optional[int] = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
__snake_case : Dict = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
__snake_case : List[str] = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
__snake_case : Optional[Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
__snake_case : Dict = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
__snake_case : Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(__lowerCamelCase , organization="hustvl" )
model.push_to_hub(__lowerCamelCase , organization="hustvl" )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = 0.0
for coeff in reversed(lowerCAmelCase__ ):
UpperCAmelCase_ = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 82 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _a ( lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = emb.weight.shape
SCREAMING_SNAKE_CASE__ : str = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = emb.weight.data
return lin_layer
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(lowercase__ , map_location='cpu' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE__ : Tuple = mam_aaa['model']
remove_ignore_keys_(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE__ : str = MaMaaaConfig(
vocab_size=lowercase__ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE__ : int = MaMaaaForConditionalGeneration(lowercase__ )
model.model.load_state_dict(lowercase__ , strict=lowercase__ )
SCREAMING_SNAKE_CASE__ : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 85 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
from PIL import Image
def __snake_case ( __UpperCamelCase : Image ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__UpperCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
__a :Tuple = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png') | 86 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger("""transformers.models.speecht5""")
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
hf_model.apply_weight_norm()
A__ = checkpoint['''input_conv.weight_g''']
A__ = checkpoint['''input_conv.weight_v''']
A__ = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
A__ = checkpoint[f"""upsamples.{i}.1.weight_g"""]
A__ = checkpoint[f"""upsamples.{i}.1.weight_v"""]
A__ = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
A__ = checkpoint['''output_conv.1.weight_g''']
A__ = checkpoint['''output_conv.1.weight_v''']
A__ = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> str:
"""simple docstring"""
if config_path is not None:
A__ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ )
else:
A__ = SpeechTaHifiGanConfig()
A__ = SpeechTaHifiGan(lowercase_ )
A__ = torch.load(lowercase_ )
load_weights(orig_checkpoint['''model''']['''generator'''] , lowercase_ , lowercase_ )
A__ = np.load(lowercase_ )
A__ = stats[0].reshape(-1 )
A__ = stats[1].reshape(-1 )
A__ = torch.from_numpy(lowercase_ ).float()
A__ = torch.from_numpy(lowercase_ ).float()
model.save_pretrained(lowercase_ )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 87 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
__UpperCAmelCase = MODEL_FOR_MASKED_LM_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCamelCase_ ( self) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""")
_lowerCamelCase : List[Any] = unmasker("""My name is <mask>""")
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-0_5, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-0_5, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_lowerCamelCase : Union[str, Any] = unmasker("""The largest city in France is <mask>""")
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-0_5,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-0_5,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_lowerCamelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6) , [
{"""sequence""": """My name is Clara""", """score""": 2e-0_5, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-0_5, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""")
_lowerCamelCase : Dict = unmasker("""My name is <mask>""")
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-0_5, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-0_5, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_lowerCamelCase : Tuple = unmasker("""The largest city in France is <mask>""")
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-0_5,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-0_5, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_lowerCamelCase : Dict = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-0_5, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-0_5, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_lowerCamelCase : Union[str, Any] = unmasker("""My name is <mask> <mask>""" , top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6) , [
[
{
"""score""": 2.2e-0_5,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-0_5, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-0_5,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-0_5, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Optional[int] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""")
# convert model to fp16
pipe.model.half()
_lowerCamelCase : List[str] = pipe("""Paris is the [MASK] of France.""")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
@slow
@require_torch
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""")
self.run_large_test(SCREAMING_SNAKE_CASE)
@slow
@require_tf
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Tuple = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""")
self.run_large_test(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Tuple:
_lowerCamelCase : Optional[int] = unmasker("""My name is <mask>""")
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE) , [
{"""sequence""": """My name is John""", """score""": 0.0_08, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.0_07, """token""": 1573, """token_str""": """ Chris"""},
] , )
_lowerCamelCase : Tuple = unmasker("""The largest city in France is <mask>""")
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.2_51,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.2_14,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_lowerCamelCase : int = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE) , [
{"""sequence""": """My name is Patrick""", """score""": 0.0_05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.0_00, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.0_00, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""")
_lowerCamelCase : int = None
_lowerCamelCase : int = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE , [])
@require_tf
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Union[str, Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""")
_lowerCamelCase : Dict = None
_lowerCamelCase : int = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE , [])
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""")
_lowerCamelCase : List[Any] = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = [
F'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Any:
_lowerCamelCase : Dict = fill_masker.tokenizer
_lowerCamelCase : Union[str, Any] = fill_masker.model
_lowerCamelCase : int = fill_masker(
F'This is a {tokenizer.mask_token}' , )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
] , )
_lowerCamelCase : Optional[int] = fill_masker([F'This is a {tokenizer.mask_token}'])
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
] , )
_lowerCamelCase : Optional[int] = fill_masker([F'This is a {tokenizer.mask_token}', F'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
],
[
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(SCREAMING_SNAKE_CASE):
fill_masker("""This is""")
self.run_test_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.run_test_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.run_test_top_k_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.fill_mask_with_duplicate_targets_and_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.fill_mask_with_multiple_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[Any]:
_lowerCamelCase : List[str] = tokenizer.get_vocab()
_lowerCamelCase : Union[str, Any] = sorted(vocab.keys())[:2]
# Pipeline argument
_lowerCamelCase : int = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , targets=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = fill_masker(F'This is a {tokenizer.mask_token}')
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
] , )
_lowerCamelCase : Optional[int] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(SCREAMING_SNAKE_CASE))
# Call argument
_lowerCamelCase : int = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = fill_masker(F'This is a {tokenizer.mask_token}' , targets=SCREAMING_SNAKE_CASE)
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
] , )
_lowerCamelCase : Dict = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(SCREAMING_SNAKE_CASE))
# Score equivalence
_lowerCamelCase : List[str] = fill_masker(F'This is a {tokenizer.mask_token}' , targets=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs]
_lowerCamelCase : Dict = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE) == set(SCREAMING_SNAKE_CASE):
_lowerCamelCase : Optional[int] = fill_masker(F'This is a {tokenizer.mask_token}' , targets=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE) , nested_simplify(SCREAMING_SNAKE_CASE))
# Raises with invalid
with self.assertRaises(SCREAMING_SNAKE_CASE):
_lowerCamelCase : str = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(SCREAMING_SNAKE_CASE):
_lowerCamelCase : Union[str, Any] = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[""""""])
with self.assertRaises(SCREAMING_SNAKE_CASE):
_lowerCamelCase : List[str] = fill_masker(F'This is a {tokenizer.mask_token}' , targets="""""")
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]:
_lowerCamelCase : List[str] = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , top_k=2)
_lowerCamelCase : Any = fill_masker(F'This is a {tokenizer.mask_token}')
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
] , )
_lowerCamelCase : List[str] = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
] , )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE) , nested_simplify(SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : Optional[int] = tokenizer.get_vocab()
_lowerCamelCase : str = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE)
# top_k=2, ntargets=3
_lowerCamelCase : List[str] = sorted(vocab.keys())[:3]
_lowerCamelCase : Optional[int] = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 , targets=SCREAMING_SNAKE_CASE)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_lowerCamelCase : Optional[Any] = [el["""token_str"""] for el in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE: x["score"] , reverse=SCREAMING_SNAKE_CASE)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE).issubset(SCREAMING_SNAKE_CASE):
_lowerCamelCase : Any = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=3 , targets=SCREAMING_SNAKE_CASE)
# They should yield exactly the same result
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE) , nested_simplify(SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Any:
_lowerCamelCase : Tuple = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_lowerCamelCase : List[Any] = sorted(vocab.keys())[:3]
_lowerCamelCase : str = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_lowerCamelCase : Union[str, Any] = fill_masker(F'My name is {tokenizer.mask_token}' , targets=SCREAMING_SNAKE_CASE , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(SCREAMING_SNAKE_CASE) , 3)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]:
_lowerCamelCase : Optional[int] = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = fill_masker(
F'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
],
[
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
],
[
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
{"""sequence""": ANY(SCREAMING_SNAKE_CASE), """score""": ANY(SCREAMING_SNAKE_CASE), """token""": ANY(SCREAMING_SNAKE_CASE), """token_str""": ANY(SCREAMING_SNAKE_CASE)},
],
] , )
| 88 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : str
lowercase_ : List[str]
lowercase_ : Optional[List[str]]
@dataclass
class _lowerCamelCase:
lowercase_ : List[int]
lowercase_ : List[int]
lowercase_ : Optional[List[int]] = None
lowercase_ : Optional[List[int]] = None
class _lowerCamelCase( _a ):
lowercase_ : List[str] = """train"""
lowercase_ : List[Any] = """dev"""
lowercase_ : List[str] = """test"""
class _lowerCamelCase:
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowerCamelCase) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase="[CLS]", lowerCamelCase=1, lowerCamelCase="[SEP]", lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=0, lowerCamelCase=0, lowerCamelCase=-1_00, lowerCamelCase=0, lowerCamelCase=True, ) -> List[InputFeatures]:
"""simple docstring"""
_lowercase : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase)}
_lowercase : Union[str, Any] = []
for ex_index, example in enumerate(lowerCamelCase):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d', lowerCamelCase, len(lowerCamelCase))
_lowercase : str = []
_lowercase : Tuple = []
for word, label in zip(example.words, example.labels):
_lowercase : Dict = tokenizer.tokenize(lowerCamelCase)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowerCamelCase) > 0:
tokens.extend(lowerCamelCase)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCamelCase) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowercase : int = tokenizer.num_special_tokens_to_add()
if len(lowerCamelCase) > max_seq_length - special_tokens_count:
_lowercase : Optional[int] = tokens[: (max_seq_length - special_tokens_count)]
_lowercase : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowercase : List[str] = [sequence_a_segment_id] * len(lowerCamelCase)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowercase : Union[str, Any] = [cls_token] + tokens
_lowercase : Any = [pad_token_label_id] + label_ids
_lowercase : List[str] = [cls_token_segment_id] + segment_ids
_lowercase : Dict = tokenizer.convert_tokens_to_ids(lowerCamelCase)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowercase : Optional[int] = [1 if mask_padding_with_zero else 0] * len(lowerCamelCase)
# Zero-pad up to the sequence length.
_lowercase : Any = max_seq_length - len(lowerCamelCase)
if pad_on_left:
_lowercase : Dict = ([pad_token] * padding_length) + input_ids
_lowercase : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowercase : List[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
_lowercase : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowerCamelCase) == max_seq_length
assert len(lowerCamelCase) == max_seq_length
assert len(lowerCamelCase) == max_seq_length
assert len(lowerCamelCase) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***')
logger.info('guid: %s', example.guid)
logger.info('tokens: %s', ' '.join([str(lowerCamelCase) for x in tokens]))
logger.info('input_ids: %s', ' '.join([str(lowerCamelCase) for x in input_ids]))
logger.info('input_mask: %s', ' '.join([str(lowerCamelCase) for x in input_mask]))
logger.info('segment_ids: %s', ' '.join([str(lowerCamelCase) for x in segment_ids]))
logger.info('label_ids: %s', ' '.join([str(lowerCamelCase) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
_lowercase : Optional[Any] = None
features.append(
InputFeatures(
input_ids=lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, label_ids=lowerCamelCase))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _lowerCamelCase( _a ):
lowercase_ : List[InputFeatures]
lowercase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase=False, lowerCamelCase = Split.train, ) -> int:
"""simple docstring"""
_lowercase : List[str] = os.path.join(
lowerCamelCase, 'cached_{}_{}_{}'.format(mode.value, tokenizer.__class__.__name__, str(lowerCamelCase)), )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowercase : List[Any] = cached_features_file + '.lock'
with FileLock(lowerCamelCase):
if os.path.exists(lowerCamelCase) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''')
_lowercase : str = torch.load(lowerCamelCase)
else:
logger.info(F'''Creating features from dataset file at {data_dir}''')
_lowercase : Optional[int] = token_classification_task.read_examples_from_file(lowerCamelCase, lowerCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
_lowercase : Optional[int] = token_classification_task.convert_examples_to_features(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, cls_token_at_end=bool(model_type in ['xlnet']), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=lowerCamelCase, pad_on_left=bool(tokenizer.padding_side == 'left'), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
logger.info(F'''Saving features into cached file {cached_features_file}''')
torch.save(self.features, lowerCamelCase)
def __len__( self) -> int:
"""simple docstring"""
return len(self.features)
def __getitem__( self, lowerCamelCase) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase:
lowercase_ : List[InputFeatures]
lowercase_ : int = -1_00
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase=False, lowerCamelCase = Split.train, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = token_classification_task.read_examples_from_file(lowerCamelCase, lowerCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
_lowercase : int = token_classification_task.convert_examples_to_features(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, cls_token_at_end=bool(model_type in ['xlnet']), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=lowerCamelCase, pad_on_left=bool(tokenizer.padding_side == 'left'), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowercase : Optional[Any] = tf.data.Dataset.from_generator(
lowerCamelCase, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa), (
{'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None])},
tf.TensorShape([None]),
), )
else:
_lowercase : Dict = tf.data.Dataset.from_generator(
lowerCamelCase, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa), (
{
'input_ids': tf.TensorShape([None]),
'attention_mask': tf.TensorShape([None]),
'token_type_ids': tf.TensorShape([None]),
},
tf.TensorShape([None]),
), )
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self) -> Any:
"""simple docstring"""
return len(self.features)
def __getitem__( self, lowerCamelCase) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 89 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _snake_case ( snake_case__ : int ):
A = int(number**0.5 )
return number == sq * sq
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A = x_den * y_den * z_den
A = gcd(snake_case__ , snake_case__ )
top //= hcf
bottom //= hcf
return top, bottom
def _snake_case ( snake_case__ : int = 35 ):
A = set()
A = 42
A = Fraction(0 )
A = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A = x_num * y_den + x_den * y_num
A = x_den * y_den
A = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
# n=2
A = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A = x_den * x_den * y_den * y_den
if is_sq(snake_case__ ) and is_sq(snake_case__ ):
A = int(sqrt(snake_case__ ) )
A = int(sqrt(snake_case__ ) )
A = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
# n=-1
A = x_num * y_num
A = x_den * y_num + x_num * y_den
A = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
# n=2
A = x_num * x_num * y_num * y_num
A = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(snake_case__ ) and is_sq(snake_case__ ):
A = int(sqrt(snake_case__ ) )
A = int(sqrt(snake_case__ ) )
A = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
for num, den in unique_s:
total += Fraction(snake_case__ , snake_case__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 91 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Tuple ) -> List[Any]:
for attribute in key.split('''.''' ):
lowercase : List[str] =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Union[str, Any] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : Union[str, Any] =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : Tuple =value
elif weight_type == "weight_g":
lowercase : Union[str, Any] =value
elif weight_type == "weight_v":
lowercase : Any =value
elif weight_type == "bias":
lowercase : Dict =value
else:
lowercase : Union[str, Any] =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Union[str, Any] =fairseq_model.state_dict()
lowercase : Tuple =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase : Union[str, Any] =None
for name, value in fairseq_dict.items():
lowercase : Any =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : Tuple =True
elif name.split('''.''' )[0] == "proj":
lowercase : List[Any] =fairseq_model.proj
lowercase : Union[str, Any] =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Tuple =True
if "*" in mapped_key:
lowercase : Any =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : Dict =mapped_key.replace('''*''' , __magic_name__ )
if "weight_g" in name:
lowercase : Optional[int] ='''weight_g'''
elif "weight_v" in name:
lowercase : Dict ='''weight_v'''
elif "bias" in name:
lowercase : Optional[Any] ='''bias'''
elif "weight" in name:
lowercase : Any ='''weight'''
else:
lowercase : Dict =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> str:
lowercase : str =full_name.split('''conv_layers.''' )[-1]
lowercase : Optional[Any] =name.split('''.''' )
lowercase : List[Any] =int(items[0] )
lowercase : Optional[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Any =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : Optional[int] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : Dict =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : List[str] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str ) -> Optional[int]:
lowercase , lowercase : List[Any] =emb.weight.shape
lowercase : Optional[Any] =nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase : Any =emb.weight.data
return lin_layer
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[int]:
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase : Dict =f.readlines()
lowercase : Optional[int] =[line.split(''' ''' )[0] for line in lines]
lowercase : List[Any] =len(__magic_name__ )
lowercase : List[str] ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__magic_name__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : int , ) -> List[str]:
lowercase : List[str] =WavaVecaConfig.from_pretrained(__magic_name__ )
lowercase : int =SpeechaTextaConfig.from_pretrained(
__magic_name__ , vocab_size=__magic_name__ , decoder_layers=__magic_name__ , do_stable_layer_norm=__magic_name__ )
lowercase : Optional[int] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase , lowercase , lowercase : Dict =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowercase : Union[str, Any] =model[0].eval()
# set weights for wav2vec2 encoder
lowercase : Union[str, Any] =WavaVecaModel(__magic_name__ )
lowercase : Union[str, Any] =recursively_load_weights_wavaveca(model.encoder , __magic_name__ )
lowercase : Optional[int] =SpeechaTextaForCausalLM(__magic_name__ )
lowercase , lowercase : Union[str, Any] =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__magic_name__ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowercase : List[Any] =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase : Optional[Any] =SpeechEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
lowercase : Tuple =False
# add projection layer
lowercase : Union[str, Any] =nn.Parameter(projection_layer.weight )
lowercase : Dict =nn.Parameter(projection_layer.bias )
lowercase : str =create_vocab_dict(__magic_name__ )
with open(os.path.join(__magic_name__ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__magic_name__ , __magic_name__ )
lowercase : Dict =SpeechaTextaTokenizer(os.path.join(__magic_name__ , '''vocab.json''' ) )
tokenizer.save_pretrained(__magic_name__ )
lowercase : Optional[Any] =hf_wavavec.config.to_dict()
lowercase : List[Any] =tokenizer.pad_token_id
lowercase : Any =tokenizer.bos_token_id
lowercase : Any =tokenizer.eos_token_id
lowercase : int ='''speech_to_text_2'''
lowercase : int ='''wav2vec2'''
lowercase : Optional[Any] =SpeechEncoderDecoderConfig.from_dict(__magic_name__ )
hf_wavavec.save_pretrained(__magic_name__ )
feature_extractor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 92 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = None
__magic_name__ :Union[str, Any] = None
@property
def snake_case ( self ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'sampling_rate' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'padding_value' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , processed_features[input_name] ) ) )
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCAmelCase__ :Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ :Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Tuple = feat_extract.model_input_names[0]
lowerCAmelCase__ :int = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCAmelCase__ :List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ :Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :str = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowerCAmelCase__ :Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ :Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def snake_case ( self , __UpperCAmelCase=False ):
'''simple docstring'''
def _inputs_have_equal_length(__UpperCAmelCase ):
lowerCAmelCase__ :int = len(input[0] )
for input_slice in input[1:]:
if len(__UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
if not np.allclose(np.asarray(__UpperCAmelCase ) , np.asarray(__UpperCAmelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :int = self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :List[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :List[Any] = self.feat_extract_tester.seq_length_diff
lowerCAmelCase__ :List[str] = self.feat_extract_tester.max_seq_length + pad_diff
lowerCAmelCase__ :Tuple = self.feat_extract_tester.min_seq_length
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.batch_size
lowerCAmelCase__ :List[str] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCAmelCase__ :Tuple = feat_extract.pad(__UpperCAmelCase , padding=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = input_a[input_name]
lowerCAmelCase__ :List[str] = feat_extract.pad(__UpperCAmelCase , padding='longest' )
lowerCAmelCase__ :Dict = input_a[input_name]
lowerCAmelCase__ :Optional[int] = feat_extract.pad(__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowerCAmelCase__ :Union[str, Any] = input_a[input_name]
lowerCAmelCase__ :Any = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )
lowerCAmelCase__ :Dict = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='max_length' )[input_name]
lowerCAmelCase__ :Any = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :str = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ :Any = feat_extract.pad(__UpperCAmelCase , pad_to_multiple_of=1_0 )
lowerCAmelCase__ :List[str] = input_a[input_name]
lowerCAmelCase__ :str = feat_extract.pad(__UpperCAmelCase , padding='longest' , pad_to_multiple_of=1_0 )
lowerCAmelCase__ :Tuple = input_a[input_name]
lowerCAmelCase__ :List[str] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=__UpperCAmelCase )
lowerCAmelCase__ :Any = input_a[input_name]
lowerCAmelCase__ :Union[str, Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=__UpperCAmelCase , return_tensors='np' , )
lowerCAmelCase__ :Union[str, Any] = input_a[input_name]
self.assertTrue(all(len(__UpperCAmelCase ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ :Any = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(__UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCAmelCase__ :List[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def snake_case ( self , __UpperCAmelCase=False ):
'''simple docstring'''
def _inputs_have_equal_length(__UpperCAmelCase ):
lowerCAmelCase__ :List[str] = len(input[0] )
for input_slice in input[1:]:
if len(__UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
if not np.allclose(np.asarray(__UpperCAmelCase ) , np.asarray(__UpperCAmelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ :Any = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :str = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCAmelCase__ :Union[str, Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = input_a[input_name]
lowerCAmelCase__ :Optional[Any] = feat_extract.pad(__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowerCAmelCase__ :Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
# truncate to smallest with np
lowerCAmelCase__ :Any = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=__UpperCAmelCase , )
lowerCAmelCase__ :List[Any] = input_a[input_name]
lowerCAmelCase__ :Any = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowerCAmelCase__ :Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
# truncate to middle
lowerCAmelCase__ :Union[str, Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=__UpperCAmelCase , return_tensors='np' , )
lowerCAmelCase__ :str = input_a[input_name]
lowerCAmelCase__ :Dict = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = input_a[input_name]
lowerCAmelCase__ :Union[str, Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowerCAmelCase__ :Union[str, Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , truncation=__UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='longest' , truncation=__UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='longest' , truncation=__UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='max_length' , truncation=__UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ :Dict = 1_2
lowerCAmelCase__ :Any = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__UpperCAmelCase , truncation=__UpperCAmelCase , )
lowerCAmelCase__ :Dict = input_a[input_name]
lowerCAmelCase__ :Dict = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__UpperCAmelCase , )
lowerCAmelCase__ :List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCAmelCase__ :Optional[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCAmelCase__ :Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
self._check_padding(numpify=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self._check_padding(numpify=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self._check_truncation(numpify=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self._check_truncation(numpify=__UpperCAmelCase )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :Optional[int] = feat_extract.model_input_names[0]
lowerCAmelCase__ :Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :int = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase__ :Tuple = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :int = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :Optional[int] = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase__ :Tuple = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.feat_extract_dict
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :List[str] = self.feature_extraction_class(**__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :int = [len(__UpperCAmelCase ) for x in speech_inputs]
lowerCAmelCase__ :List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :Optional[int] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :Dict = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.feat_extract_dict
lowerCAmelCase__ :int = True
lowerCAmelCase__ :Any = self.feature_extraction_class(**__UpperCAmelCase )
lowerCAmelCase__ :Any = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :List[str] = [len(__UpperCAmelCase ) for x in speech_inputs]
lowerCAmelCase__ :List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :int = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :int = min(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 93 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = filter(lambda A__ : p.requires_grad ,model.parameters() )
UpperCAmelCase_ : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase_ = logging.getLogger(__name__)
def snake_case ( A__ ,A__ ):
if metric == "rouge2":
UpperCAmelCase_ : int = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase_ : Optional[Any] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase_ : Tuple = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCAmelCase_ : Dict = ModelCheckpoint(
dirpath=A__ ,filename=A__ ,monitor=F"""val_{metric}""" ,mode="max" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def snake_case ( A__ ,A__ ):
return EarlyStopping(
monitor=F"""val_{metric}""" ,mode="min" if "loss" in metric else "max" ,patience=A__ ,verbose=A__ ,)
class UpperCamelCase_ (pl.Callback ):
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Dict = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCAmelCase_ : Optional[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase_ : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : List[str] = od / "test_results.txt"
UpperCAmelCase_ : Any = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : Union[str, Any] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCAmelCase_ : List[str] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , "a+" ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : Any = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
UpperCAmelCase_ : Optional[Any] = val.item()
UpperCAmelCase_ : str = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : str = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCAmelCase_ )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ) -> Dict:
try:
UpperCAmelCase_ : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Optional[Any] = pl_module.model.num_parameters()
UpperCAmelCase_ : Tuple = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , "test" )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : List[Any] ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 95 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCamelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCamelCase = [0, 25, 50]
__lowerCamelCase = [25, 50, 75]
__lowerCamelCase = fuzz.membership.trimf(X, abca)
__lowerCamelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCamelCase = np.ones(75)
__lowerCamelCase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowerCamelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCamelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCamelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCamelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCamelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCamelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCamelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCamelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 96 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase_ = tempfile.mkdtemp()
lowercase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase_ = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowercase_ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> Any:
lowercase_ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase_ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = self.get_image_processor()
lowercase_ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
lowercase_ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> int:
lowercase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase_ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
lowercase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.prepare_image_inputs()
lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
lowercase_ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''lower newer'''
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : Dict ) -> str:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''lower newer'''
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _lowercase ( self : int ) -> List[str]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> Any:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''lower newer'''
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 97 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def a__ ( lowercase : Dict=None ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(add_help=lowercase, allow_abbrev=lowercase )
# The main config parser
_UpperCamelCase = config_command_parser(lowercase )
# The subparser to add commands to
_UpperCamelCase = config_parser.add_subparsers(title='''subcommands''', dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(lowercase, parents=[parent_parser] )
update_command_parser(lowercase, parents=[parent_parser] )
return config_parser
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = get_config_parser()
_UpperCamelCase = config_parser.parse_args()
if not hasattr(lowercase, '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 98 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if not (isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__a = len(lowerCAmelCase__ )
__a = len(lowerCAmelCase__ )
__a = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__a = 0
__a = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__a = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__a = i
__a = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.