code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : Any = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ["""PoolFormerFeatureExtractor"""]
_A : Dict = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 100 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
import os
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[str] = os.path.dirname(os.path.realpath(A__ ) )
SCREAMING_SNAKE_CASE_ : int = os.path.join(A__, 'triangle.txt' )
with open(A__ ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Any = []
for line in triangle:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(A__ ) )
a.append(A__ )
for i in range(1, len(A__ ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE_ : Any = a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE_ : int = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A__, A__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 101 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : int = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : str = """convnextv2"""
def __init__( self , _A=3 , _A=4 , _A=4 , _A=None , _A=None , _A="gelu" , _A=0.02 , _A=1e-1_2 , _A=0.0 , _A=2_2_4 , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : int = patch_size
UpperCamelCase : Dict = num_stages
UpperCamelCase : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCamelCase : str = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = drop_path_rate
UpperCamelCase : Any = image_size
UpperCamelCase : Union[str, Any] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 102 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''''''
else:
_snake_case = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = dct.pop(lowerCAmelCase_ )
_snake_case = val
def snake_case ( ) -> List[Any]:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = DeiTConfig()
# all deit models have fine-tuned heads
_snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_snake_case = 1000
_snake_case = '''huggingface/label-files'''
_snake_case = '''imagenet-1k-id2label.json'''
_snake_case = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = int(deit_name[-6:-4] )
_snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_snake_case = 192
_snake_case = 768
_snake_case = 12
_snake_case = 3
elif deit_name[9:].startswith('''small''' ):
_snake_case = 384
_snake_case = 1536
_snake_case = 12
_snake_case = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_snake_case = 1024
_snake_case = 4096
_snake_case = 24
_snake_case = 16
# load original model from timm
_snake_case = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case = timm_model.state_dict()
_snake_case = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
_snake_case = DeiTForImageClassificationWithTeacher(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
_snake_case = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_snake_case = DeiTImageProcessor(size=lowerCAmelCase_ , crop_size=config.image_size )
_snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
_snake_case = encoding['''pixel_values''']
_snake_case = model(lowerCAmelCase_ )
_snake_case = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 103 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
A__ = prime_factors(UpperCAmelCase_ )
if is_square_free(UpperCAmelCase_ ):
return -1 if len(UpperCAmelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
import math
import tensorflow as tf
from packaging import version
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = tf.convert_to_tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.convert_to_tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = tf.cast(math.pi , x.dtype )
SCREAMING_SNAKE_CASE_ : Dict = tf.cast(0.0_4_4_7_1_5 , x.dtype )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase_ , 3 )) ))
return x * cdf
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tf.convert_to_tensor(lowerCamelCase_ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.convert_to_tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.convert_to_tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
return tf.clip_by_value(_gelu(lowerCamelCase_ ) , -10 , 10 )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=-1 ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = tf.split(lowerCamelCase_ , 2 , axis=lowerCamelCase_ )
return a * tf.math.sigmoid(lowerCamelCase_ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return tf.keras.activations.gelu(lowerCamelCase_ , approximate=lowerCamelCase_ )
UpperCamelCase__ : Any = tf.keras.activations.gelu
UpperCamelCase__ : Optional[Any] = approximate_gelu_wrap
else:
UpperCamelCase__ : str = _gelu
UpperCamelCase__ : Tuple = _gelu_new
UpperCamelCase__ : Any = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 105 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case :List[str] ='src/diffusers'
# Matches is_xxx_available()
__snake_case :Union[str, Any] =re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
__snake_case :Optional[int] =re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
__snake_case :Union[str, Any] ='\n{0} = None\n'
__snake_case :List[str] ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
__snake_case :str ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
A = _re_backend.findall(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 0:
return None
return "_and_".join(lowerCAmelCase__ )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
with open(os.path.join(lowerCAmelCase__ , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Get to the point we do the actual imports for type checking
A = 0
A = {}
# Go through the end of the file
while line_index < len(lowerCAmelCase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
A = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
A = []
# Until we unindent, add backend objects to the list
while line_index < len(lowerCAmelCase__ ) and len(lines[line_index] ) > 1:
A = lines[line_index]
A = _re_single_line_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowerCAmelCase__ ) > 0:
A = objects
else:
line_index += 1
return backend_specific_objects
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(lowerCAmelCase__ )
elif name.islower():
return DUMMY_FUNCTION.format(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return DUMMY_CLASS.format(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : Any=None ) -> int:
'''simple docstring'''
if backend_specific_objects is None:
A = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
A = {}
for backend, objects in backend_specific_objects.items():
A = '[' + ', '.join(F'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
A = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowerCAmelCase__ , lowerCAmelCase__ ) for o in objects] )
A = dummy_file
return dummy_files
def lowerCamelCase_ ( lowerCAmelCase__ : Dict=False ) -> Any:
'''simple docstring'''
A = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
A = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
A = os.path.join(lowerCAmelCase__ , 'utils' )
A = {
backend: os.path.join(lowerCAmelCase__ , F'''dummy_{short_names.get(lowerCAmelCase__ , lowerCAmelCase__ )}_objects.py''' )
for backend in dummy_files.keys()
}
A = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.read()
else:
A = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(lowerCAmelCase__ , lowerCAmelCase__ )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'''diffusers.utils.dummy_{short_names.get(lowerCAmelCase__ , lowerCAmelCase__ )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case :str =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case :List[str] =parser.parse_args()
check_dummies(args.fix_and_overwrite) | 106 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = CLIPTokenizer
__lowerCAmelCase = CLIPTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = {}
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Dict ) -> Tuple:
super().setUp()
# fmt: off
_A = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_A = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) )
_A = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
_A = {'unk_token': '<unk>'}
_A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[Any], **UpperCamelCase__ : Optional[Any] ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def __UpperCAmelCase ( self : str, **UpperCamelCase__ : List[str] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : str ) -> Tuple:
_A = 'lower newer'
_A = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
_A = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_A = 'lower newer'
_A = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
_A = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
_A = tokens + [tokenizer.unk_token]
_A = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ), UpperCamelCase__ )
@require_ftfy
def __UpperCAmelCase ( self : int ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_A = self.tokenizer_class.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
_A = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
_A = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
_A = tokenizer_s.tokenize(UpperCamelCase__ )
_A = tokenizer_r.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A = 'xa\u0303y' + ' ' + 'x\xe3y'
_A = tokenizer_s.tokenize(UpperCamelCase__ )
_A = tokenizer_r.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
# Test that the tokenization is identical on unicode of space type
_A = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A = tokenizer_s.tokenize(UpperCamelCase__ )
_A = tokenizer_r.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
_A = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A = tokenizer_s.tokenize(UpperCamelCase__ )
_A = tokenizer_r.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_A = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_A = f'{text_of_1_token} {text_of_1_token}'
_A = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__, use_fast=UpperCamelCase__, )
_A = tokenizer_r(UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )), )
_A = f' {text}'
_A = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__, use_fast=UpperCamelCase__, )
_A = tokenizer_r(UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )), )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def __UpperCAmelCase ( self : Any ) -> List[Any]:
super().test_tokenization_python_rust_equals()
def __UpperCAmelCase ( self : str ) -> str:
# CLIP always lower cases letters
pass
| 107 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = PhobertTokenizer
_lowerCamelCase = False
def lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
_UpperCAmelCase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_UpperCAmelCase = ["""#version: 0.2""", """l à</w>"""]
_UpperCAmelCase = {"""unk_token""": """<unk>"""}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
def lowerCamelCase ( self : Union[str, Any] , **lowerCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = """Tôi là VinAI Research"""
_UpperCAmelCase = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = """Tôi là VinAI Research"""
_UpperCAmelCase = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
_UpperCAmelCase = tokenizer.tokenize(lowerCamelCase )
print(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase ) | 108 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCAmelCase ( snake_case : Dict ):
if "model" in orig_key:
_lowerCAmelCase:List[Any] = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
_lowerCAmelCase:Optional[int] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
_lowerCAmelCase:Dict = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
_lowerCAmelCase:List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
_lowerCAmelCase:Any = orig_key.split('''.''' )[0].split('''_''' )[-1]
_lowerCAmelCase:Union[str, Any] = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_lowerCAmelCase:Dict = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
_lowerCAmelCase:Optional[int] = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
_lowerCAmelCase:Optional[int] = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
_lowerCAmelCase:Tuple = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
_lowerCAmelCase:Tuple = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
_lowerCAmelCase:str = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
_lowerCAmelCase:Dict = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
_lowerCAmelCase:List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
_lowerCAmelCase:int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
_lowerCAmelCase:int = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
_lowerCAmelCase:List[str] = """yoso.""" + orig_key
return orig_key
def UpperCAmelCase ( snake_case : Any , snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
_lowerCAmelCase:Dict = orig_state_dict.pop(lowerCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowerCAmelCase:Dict = val
_lowerCAmelCase:Optional[Any] = orig_state_dict["""cls.predictions.decoder.bias"""]
_lowerCAmelCase:str = torch.arange(lowerCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCAmelCase ( snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : List[str] ):
_lowerCAmelCase:List[str] = torch.load(lowerCamelCase__ , map_location='''cpu''' )["""model_state_dict"""]
_lowerCAmelCase:Any = YosoConfig.from_json_file(lowerCamelCase__ )
_lowerCAmelCase:List[str] = YosoForMaskedLM(lowerCamelCase__ )
_lowerCAmelCase:Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , lowerCamelCase__ )
print(model.load_state_dict(lowerCamelCase__ ) )
model.eval()
model.save_pretrained(lowerCamelCase__ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 227 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = [[] for _ in range(lowerCamelCase__ )]
snake_case_ = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(lowerCamelCase__ ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase__ ):
snake_case_ = position % (lowest * 2) # puts it in bounds
snake_case_ = min(lowerCamelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase__ )
snake_case_ = ["""""".join(lowerCamelCase__ ) for row in temp_grid]
snake_case_ = """""".join(lowerCamelCase__ )
return output_string
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = []
snake_case_ = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
snake_case_ = [[] for _ in range(lowerCamelCase__ )] # generates template
for position in range(len(lowerCamelCase__ ) ):
snake_case_ = position % (lowest * 2) # puts it in bounds
snake_case_ = min(lowerCamelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
snake_case_ = 0
for row in temp_grid: # fills in the characters
snake_case_ = input_string[counter : counter + len(lowerCamelCase__ )]
grid.append(list(lowerCamelCase__ ) )
counter += len(lowerCamelCase__ )
snake_case_ = """""" # reads as zigzag
for position in range(len(lowerCamelCase__ ) ):
snake_case_ = position % (lowest * 2) # puts it in bounds
snake_case_ = min(lowerCamelCase__, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = {}
for key_guess in range(1, len(lowerCamelCase__ ) ): # tries every key
snake_case_ = decrypt(lowerCamelCase__, lowerCamelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowerCAmelCase : int =logging.getLogger(__name__)
_lowerCAmelCase : List[Any] =50 # max width of layer names
_lowerCAmelCase : List[Any] =70 # max width of quantizer names
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Dict = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" ,type=lowerCamelCase__ ,default=8 ,help="weight precision" )
group.add_argument("--aprec" ,type=lowerCamelCase__ ,default=8 ,help="activation precision" )
group.add_argument("--quant-per-tensor" ,action="store_true" ,help="per tensor weight scaling" )
group.add_argument("--quant-disable" ,action="store_true" ,help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" ,action="store_true" ,help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" ,type=lowerCamelCase__ ,nargs="+" ,help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" ,type=lowerCamelCase__ ,help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" ,type=lowerCamelCase__ ,help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" ,default="max" ,help="which quantization range calibrator to use" )
group.add_argument("--percentile" ,default=lowerCamelCase__ ,type=lowerCamelCase__ ,help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" ,action="store_true" ,help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" ,metavar="N" ,type=lowerCamelCase__ ,help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" ,action="store_true" ,help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) ,)
def _A ( SCREAMING_SNAKE_CASE ):
if args.calibrator == "max":
UpperCAmelCase__: Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase__: int = """histogram"""
elif args.calibrator == "mse":
UpperCAmelCase__: Dict = """histogram"""
else:
raise ValueError(f"Invalid calibrator {args.calibrator}" )
UpperCAmelCase__: int = QuantDescriptor(num_bits=args.aprec ,calib_method=lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = QuantDescriptor(num_bits=args.wprec ,axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE=False ,SCREAMING_SNAKE_CASE=False ):
logger.info("Configuring Model for Quantization" )
logger.info(f"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ ,["embeddings"] ,which="weight" ,_disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ ,[""] ,_disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ ,args.quant_disable_keyword ,_disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ ,[R"layer.\d+." + args.quant_disable_layer_module] ,_disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ ,[R"layer.\d+." + args.quant_enable_layer_module] ,_disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ ,lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ ,args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def _A ( SCREAMING_SNAKE_CASE ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"{name:80}: {module}" )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator ,calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" ,percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
def fusea(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ ,"_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase__: List[Any] = qq._amax.detach().item()
UpperCAmelCase__: Optional[int] = qk._amax.detach().item()
UpperCAmelCase__: Dict = qv._amax.detach().item()
UpperCAmelCase__: Any = max(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer ,mod.matmul_k_input_quantizer ,mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer ,mod.key._weight_quantizer ,mod.value._weight_quantizer )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase__: Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
UpperCAmelCase__: Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _A ( SCREAMING_SNAKE_CASE ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ ,"_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase__: Tuple = mod.weight.shape[0]
UpperCAmelCase__: Dict = mod._weight_quantizer._amax.detach()
UpperCAmelCase__: List[Any] = torch.ones(lowerCamelCase__ ,dtype=amax.dtype ,device=amax.device ) * amax
print(f"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _A ( SCREAMING_SNAKE_CASE ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ ,"_weight_quantizer" ):
if not hasattr(mod.weight_quantizer ,"_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase__: Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase__: Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase__: int = pytorch_quantization.utils.reduce_amax(mod.weight ,axis=lowerCamelCase__ ,keepdims=lowerCamelCase__ ).detach()
logger.info(f"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase__: str = amax
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE=2_5 ,SCREAMING_SNAKE_CASE=1_8_0 ,SCREAMING_SNAKE_CASE=None ):
if ignore is None:
UpperCAmelCase__: int = []
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__: Union[str, Any] = [ignore]
UpperCAmelCase__: Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ ,"weight" ):
continue
UpperCAmelCase__: List[str] = max(lowerCamelCase__ ,len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
UpperCAmelCase__: Tuple = getattr(lowerCamelCase__ ,"_input_quantizer" ,lowerCamelCase__ )
UpperCAmelCase__: List[Any] = getattr(lowerCamelCase__ ,"_weight_quantizer" ,lowerCamelCase__ )
if not hasattr(lowerCamelCase__ ,"weight" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
UpperCAmelCase__: Optional[int] = f"Act:{input_q.extra_repr()}"
UpperCAmelCase__: Dict = f"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase__: List[Any] = f"{name:{name_width}} {act_str} {wgt_str}"
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f"{name:{name_width}} {act_str}" )
logger.info(f"{' ':{name_width}} {wgt_str}" )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ ,pytorch_quantization.nn.TensorQuantizer ):
print(f"{name:80} {mod}" )
count += 1
print(f"{count} TensorQuantizers found in model" )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Dict = getattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ ,lowerCamelCase__ )
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
else:
logger.warning(f"{name} has no {quantizer}" )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE="both" ,**SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = f"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ ,lowerCamelCase__ ,"_input_quantizer" ,lowerCamelCase__ ,lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ ,lowerCamelCase__ ,"_weight_quantizer" ,lowerCamelCase__ ,lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,**SCREAMING_SNAKE_CASE ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ ,"_input_quantizer" ) or hasattr(lowerCamelCase__ ,"_weight_quantizer" ):
for n in names:
if re.search(lowerCamelCase__ ,lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__: Dict = f"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 113 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__A : List[str] = logging.get_logger(__name__)
class _UpperCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'upernet'
def __init__( self : List[str] , A : Any=None , A : Dict=5_12 , A : Tuple=0.02 , A : Optional[Any]=[1, 2, 3, 6] , A : int=True , A : Tuple=0.4 , A : int=3_84 , A : Union[str, Any]=2_56 , A : Optional[int]=1 , A : Tuple=False , A : int=2_55 , **A : Optional[int] , ) -> str:
super().__init__(**A )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ : int = CONFIG_MAPPING["""resnet"""](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(A , A ):
lowercase_ : List[Any] = backbone_config.get('''model_type''' )
lowercase_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase_ : str = config_class.from_dict(A )
lowercase_ : str = backbone_config
lowercase_ : str = hidden_size
lowercase_ : str = initializer_range
lowercase_ : List[str] = pool_scales
lowercase_ : List[Any] = use_auxiliary_head
lowercase_ : Dict = auxiliary_loss_weight
lowercase_ : Any = auxiliary_in_channels
lowercase_ : Any = auxiliary_channels
lowercase_ : Optional[int] = auxiliary_num_convs
lowercase_ : int = auxiliary_concat_input
lowercase_ : List[Any] = loss_ignore_index
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : Any = self.backbone_config.to_dict()
lowercase_ : int = self.__class__.model_type
return output
| 231 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase__ : Any = 'rwkv'
UpperCamelCase__ : List[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _A=50_277 , _A=1_024 , _A=4_096 , _A=32 , _A=None , _A=None , _A=1e-5 , _A=0 , _A=0 , _A=6 , _A=False , _A=True , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = context_length
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = attention_hidden_size if attention_hidden_size is not None else hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size if intermediate_size is not None else 4 * hidden_size
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = rescale_every
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
tie_word_embeddings=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 148 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : int = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case : List[str] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case : str = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case : Union[str, Any] = tf_top_k_top_p_filtering(snake_case__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case : int = output[output != -float("inf" )]
snake_case : Optional[Any] = tf.cast(
tf.where(tf.not_equal(snake_case__ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-12 )
tf.debugging.assert_equal(snake_case__ , snake_case__ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ,__UpperCAmelCase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
A__ : Any = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : str = 2
snake_case : Dict = 2
class UpperCAmelCase ( tf.Module ):
def __init__(self : Any , snake_case__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
super(snake_case__ , self ).__init__()
snake_case : Any = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = self.model.generate(
input_ids=snake_case__ , attention_mask=snake_case__ , max_new_tokens=snake_case__ , return_dict_in_generate=snake_case__ , )
return {"sequences": outputs["sequences"]}
snake_case : Optional[int] = [[2, 0], [1_02, 1_03]]
snake_case : Optional[int] = [[1, 0], [1, 1]]
snake_case : Union[str, Any] = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__ , snake_case__ , signatures={"serving_default": dummy_model.serving} )
snake_case : Optional[int] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(snake_case__ ) + 1 ):
snake_case : Optional[int] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case : Dict = serving_func(**snake_case__ )["""sequences"""]
snake_case : List[str] = test_model.generate(**snake_case__ , max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : Any = 1
snake_case : Optional[Any] = 2
class UpperCAmelCase ( tf.Module ):
def __init__(self : Optional[int] , snake_case__ : Any ) -> List[str]:
'''simple docstring'''
super(snake_case__ , self ).__init__()
snake_case : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str , snake_case__ : Any ) -> str:
'''simple docstring'''
snake_case : Any = self.model.generate(
input_ids=snake_case__ , attention_mask=snake_case__ , max_new_tokens=snake_case__ , return_dict_in_generate=snake_case__ , )
return {"sequences": outputs["sequences"]}
snake_case : Tuple = [[2], [1_02, 1_03]]
snake_case : Any = [[1], [1, 1]]
snake_case : Union[str, Any] = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__ , snake_case__ , signatures={"serving_default": dummy_model.serving} )
snake_case : List[str] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""]
for input_row in range(len(snake_case__ ) ):
snake_case : Any = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case : Union[str, Any] = serving_func(**snake_case__ )["""sequences"""]
snake_case : str = test_model.generate(**snake_case__ , max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__ , snake_case__ )
@slow
@require_tensorflow_text
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=snake_case__ )
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__(self : Any ) -> int:
'''simple docstring'''
super().__init__()
snake_case : List[str] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case__ , "spiece.model" ) , "rb" ).read() )
snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , *snake_case__ : Tuple , **snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.tokenizer.tokenize(snake_case__ )
snake_case : int = text.pad_model_inputs(
snake_case__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case : Dict = self.model.generate(input_ids=snake_case__ , attention_mask=snake_case__ )
return self.tokenizer.detokenize(snake_case__ )
snake_case : Any = CompleteSentenceTransformer()
snake_case : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
snake_case : Union[str, Any] = complete_model(snake_case__ )
snake_case : Tuple = tf.keras.Model(snake_case__ , snake_case__ )
keras_model.save(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
snake_case : str = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
snake_case : Any = 14
snake_case : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : str = """Hello, my dog is cute and"""
snake_case : List[str] = tokenizer(snake_case__ , return_tensors="tf" )
snake_case : Dict = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
snake_case : Optional[int] = model.generate(**snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case : int = [6_38, 1_98]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
snake_case : Dict = model.generate(**snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case : List[str] = """Hugging Face is a technology company based in New York and Paris."""
snake_case : Dict = bart_tokenizer(snake_case__ , return_tensors="tf" ).input_ids
snake_case : str = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case : Union[str, Any] = bart_model.generate(snake_case__ ).numpy()
class UpperCAmelCase ( __UpperCAmelCase ):
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any=None , **snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
return super().call(snake_case__ , **snake_case__ )
snake_case : Tuple = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case : Optional[int] = bart_model.generate(snake_case__ , foo="bar" ).numpy()
self.assertTrue(np.array_equal(snake_case__ , snake_case__ ) )
class UpperCAmelCase ( bart_model.model.encoder.__class__ ):
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , **snake_case__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return super().call(snake_case__ , **snake_case__ )
snake_case : Dict = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case : Optional[int] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case : str = bart_model.generate(snake_case__ ).numpy()
with self.assertRaises(snake_case__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case__ , foo="bar" )
| 204 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase_ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCamelCase_ = '''=======
>>>>>>>
'''
lowerCamelCase_ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCamelCase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def __lowerCamelCase ( a_ : Any ) -> List[str]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _SCREAMING_SNAKE_CASE( __UpperCAmelCase ):
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=SCREAMING_SNAKE_CASE__ ,required=SCREAMING_SNAKE_CASE__ ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=SCREAMING_SNAKE_CASE__ ,required=SCREAMING_SNAKE_CASE__ ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = get_logger('''datasets-cli/converting''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = tfds_path
__SCREAMING_SNAKE_CASE :List[str] = datasets_directory
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
__SCREAMING_SNAKE_CASE :Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__SCREAMING_SNAKE_CASE :List[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__SCREAMING_SNAKE_CASE :Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
__SCREAMING_SNAKE_CASE :Optional[int] = []
__SCREAMING_SNAKE_CASE :List[str] = []
__SCREAMING_SNAKE_CASE :Optional[Any] = {}
if os.path.isdir(self._tfds_path ):
__SCREAMING_SNAKE_CASE :List[str] = os.listdir(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
__SCREAMING_SNAKE_CASE :List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(SCREAMING_SNAKE_CASE__ ,encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE :List[str] = f.readlines()
__SCREAMING_SNAKE_CASE :List[Any] = []
__SCREAMING_SNAKE_CASE :List[Any] = False
__SCREAMING_SNAKE_CASE :Optional[Any] = False
__SCREAMING_SNAKE_CASE :Tuple = []
for line in lines:
__SCREAMING_SNAKE_CASE :List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__SCREAMING_SNAKE_CASE :List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
__SCREAMING_SNAKE_CASE :List[str] = """"""
continue
elif "from absl import logging" in out_line:
__SCREAMING_SNAKE_CASE :List[Any] = """from datasets import logging\n"""
elif "getLogger" in out_line:
__SCREAMING_SNAKE_CASE :Tuple = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__SCREAMING_SNAKE_CASE :str = True
__SCREAMING_SNAKE_CASE :Tuple = list(filter(lambda SCREAMING_SNAKE_CASE__ : e in out_line ,SCREAMING_SNAKE_CASE__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
out_lines.append(SCREAMING_SNAKE_CASE__ )
out_lines.append(SCREAMING_SNAKE_CASE__ )
continue
else:
for pattern, replacement in TO_CONVERT:
__SCREAMING_SNAKE_CASE :Optional[Any] = re.sub(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__SCREAMING_SNAKE_CASE :List[Any] = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,SCREAMING_SNAKE_CASE__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__SCREAMING_SNAKE_CASE :str = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__SCREAMING_SNAKE_CASE :List[str] = True
out_lines.append(SCREAMING_SNAKE_CASE__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__SCREAMING_SNAKE_CASE :List[str] = f_name.replace('''.py''' ,'''''' )
__SCREAMING_SNAKE_CASE :str = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(SCREAMING_SNAKE_CASE__ )
if needs_manual_update:
with_manual_update.append(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(SCREAMING_SNAKE_CASE__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
__SCREAMING_SNAKE_CASE :Any = os.path.basename(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' ) | 498 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A_, A_=13, A_=3, A_=224, A_=30, A_=400, A_=True, A_=None, A_=True, A_=[0.5, 0.5, 0.5], A_=[0.5, 0.5, 0.5], ) -> Tuple:
UpperCAmelCase__ =size if size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =image_size
UpperCAmelCase__ =min_resolution
UpperCAmelCase__ =max_resolution
UpperCAmelCase__ =do_resize
UpperCAmelCase__ =size
UpperCAmelCase__ =do_normalize
UpperCAmelCase__ =image_mean
UpperCAmelCase__ =image_std
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case_ ( __UpperCAmelCase, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ViTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =EfficientFormerImageProcessorTester(self )
@property
def __UpperCAmelCase ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_, "image_mean" ) )
self.assertTrue(hasattr(A_, "image_std" ) )
self.assertTrue(hasattr(A_, "do_normalize" ) )
self.assertTrue(hasattr(A_, "do_resize" ) )
self.assertTrue(hasattr(A_, "size" ) )
def __UpperCAmelCase ( self ) -> Tuple:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ =prepare_image_inputs(self.image_proc_tester, equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_, Image.Image )
# Test not batched input
UpperCAmelCase__ =image_processor(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
# Test batched
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
def __UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processor
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ =prepare_image_inputs(self.image_proc_tester, equal_resolution=A_, numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_, np.ndarray )
# Test not batched input
UpperCAmelCase__ =image_processor(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
# Test batched
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
def __UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processor
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ =prepare_image_inputs(self.image_proc_tester, equal_resolution=A_, torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_, torch.Tensor )
# Test not batched input
UpperCAmelCase__ =image_processor(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
# Test batched
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
| 625 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
a_ : int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase__)
cs.append(2**8 + n)
n += 1
SCREAMING_SNAKE_CASE = [chr(lowerCamelCase__) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__))
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
SCREAMING_SNAKE_CASE = char
return pairs
class _snake_case ( __UpperCAmelCase ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = ['input_ids', 'attention_mask']
def __init__( self , a , a , a="replace" , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=False , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding='utf-8') as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(a)
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding='utf-8') as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split('\n')[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(a , range(len(a))))
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return len(self.encoder)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(a)
SCREAMING_SNAKE_CASE = get_pairs(a)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(a , key=lambda a: self.bpe_ranks.get(a , float('inf')))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(a):
try:
SCREAMING_SNAKE_CASE = word.index(a , a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE = tuple(a)
SCREAMING_SNAKE_CASE = new_word
if len(a) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(a)
SCREAMING_SNAKE_CASE = """ """.join(a)
SCREAMING_SNAKE_CASE = word
return word
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , a):
SCREAMING_SNAKE_CASE = """""".join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a).split(' '))
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
return self.encoder.get(a , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
return self.decoder.get(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE = """""".join(a)
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Optional[Any]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(a , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a) + '\n')
SCREAMING_SNAKE_CASE = 0
with open(a , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
SCREAMING_SNAKE_CASE = token_index
writer.write(' '.join(a) + '\n')
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Optional[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is None:
return [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1, 1] + ([0] * len(a)) + [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Any:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE__ ( self , a , a=False , **a) -> int:
SCREAMING_SNAKE_CASE = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(a) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = PaddingStrategy.DO_NOT_PAD , a = None , a = None , ) -> List[Any]:
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=a , max_length=a , padding_strategy=a , pad_to_multiple_of=a , return_attention_mask=a , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask']) != len(a)
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(a) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs
| 73 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE( __UpperCAmelCase ):
_UpperCAmelCase = ['image_processor', 'tokenizer']
_UpperCAmelCase = 'BlipImageProcessor'
_UpperCAmelCase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self: List[str] , UpperCamelCase: int , UpperCamelCase: Any ) -> Optional[int]:
snake_case__ = False
super().__init__(UpperCamelCase , UpperCamelCase )
snake_case__ = self.image_processor
def __call__( self: Dict , UpperCamelCase: str = None , UpperCamelCase: int = None , UpperCamelCase: Tuple = True , UpperCamelCase: List[str] = False , UpperCamelCase: int = None , UpperCamelCase: Tuple = None , UpperCamelCase: Any = 0 , UpperCamelCase: List[Any] = None , UpperCamelCase: Dict = None , UpperCamelCase: Optional[Any] = False , UpperCamelCase: Union[str, Any] = False , UpperCamelCase: Tuple = False , UpperCamelCase: List[Any] = False , UpperCamelCase: Optional[int] = False , UpperCamelCase: Tuple = True , UpperCamelCase: Dict = None , **UpperCamelCase: Union[str, Any] , ) -> List[Any]:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
snake_case__ = self.tokenizer
snake_case__ = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
return text_encoding
# add pixel_values
snake_case__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase )
if text is not None:
snake_case__ = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
else:
snake_case__ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase )
return encoding_image_processor
def lowerCAmelCase_ ( self: Optional[int] , *UpperCamelCase: Optional[int] , **UpperCamelCase: Optional[Any] ) -> str:
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Any , *UpperCamelCase: List[Any] , **UpperCamelCase: Optional[Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ = self.tokenizer.model_input_names
snake_case__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 328 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a = logging.get_logger(__name__)
class lowercase_ ( __UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = 'mask2former'
UpperCAmelCase : Optional[Any] = ['swin']
UpperCAmelCase : Any = {'hidden_size': 'hidden_dim'}
def __init__( self : List[str] , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Optional[Any] = 256 , _UpperCAmelCase : Any = 256 , _UpperCAmelCase : Optional[Any] = 256 , _UpperCAmelCase : Tuple = 1_024 , _UpperCAmelCase : List[str] = "relu" , _UpperCAmelCase : Tuple = 6 , _UpperCAmelCase : str = 10 , _UpperCAmelCase : Dict = 8 , _UpperCAmelCase : str = 0.0 , _UpperCAmelCase : Union[str, Any] = 2_048 , _UpperCAmelCase : Union[str, Any] = False , _UpperCAmelCase : Optional[int] = False , _UpperCAmelCase : List[Any] = 4 , _UpperCAmelCase : List[str] = 255 , _UpperCAmelCase : Any = 100 , _UpperCAmelCase : List[Any] = 0.1 , _UpperCAmelCase : Tuple = 2.0 , _UpperCAmelCase : str = 5.0 , _UpperCAmelCase : str = 5.0 , _UpperCAmelCase : Any = 12_544 , _UpperCAmelCase : Dict = 3.0 , _UpperCAmelCase : Any = 0.75 , _UpperCAmelCase : Optional[Any] = 0.02 , _UpperCAmelCase : Optional[Any] = 1.0 , _UpperCAmelCase : Optional[Any] = True , _UpperCAmelCase : Any = [4, 8, 16, 32] , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : int , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
_A = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = backbone_config.pop('model_type' )
_A = CONFIG_MAPPING[backbone_model_type]
_A = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
_A = backbone_config
_A = feature_size
_A = mask_feature_size
_A = hidden_dim
_A = encoder_feedforward_dim
_A = activation_function
_A = encoder_layers
_A = decoder_layers
_A = num_attention_heads
_A = dropout
_A = dim_feedforward
_A = pre_norm
_A = enforce_input_projection
_A = common_stride
_A = ignore_value
_A = num_queries
_A = no_object_weight
_A = class_weight
_A = mask_weight
_A = dice_weight
_A = train_num_points
_A = oversample_ratio
_A = importance_sample_ratio
_A = init_std
_A = init_xavier_std
_A = use_auxiliary_loss
_A = feature_strides
_A = output_auxiliary_logits
_A = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : List[Any] , **_UpperCAmelCase : Optional[int] ):
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCAmelCase_ ( self : Tuple ):
_A = copy.deepcopy(self.__dict__ )
_A = self.backbone_config.to_dict()
_A = self.__class__.model_type
return output
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(lowerCamelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = str(lowerCamelCase__ )
snake_case_ = [n]
for i in range(1, len(lowerCamelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if len(str(lowerCamelCase__ ) ) > 3:
if not is_prime(int(str(lowerCamelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCamelCase__ )[:3] ) ):
return False
return True
def __magic_name__ ( __UpperCAmelCase = 11 ) -> Optional[int]:
'''simple docstring'''
snake_case_ = []
snake_case_ = 13
while len(lowerCamelCase__ ) != count:
if validate(lowerCamelCase__ ):
snake_case_ = list_truncated_nums(lowerCamelCase__ )
if all(is_prime(lowerCamelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase__ )
num += 2
return list_truncated_primes
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 640 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
import math
import sys
def _A ( SCREAMING_SNAKE_CASE ):
if number != int(lowerCamelCase__ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
UpperCAmelCase__: int = [-1] * (number + 1)
UpperCAmelCase__: Optional[int] = 0
for i in range(1 ,number + 1 ):
UpperCAmelCase__: List[str] = sys.maxsize
UpperCAmelCase__: Tuple = int(math.sqrt(lowerCamelCase__ ) )
for j in range(1 ,root + 1 ):
UpperCAmelCase__: Tuple = 1 + answers[i - (j**2)]
UpperCAmelCase__: Optional[int] = min(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__: Tuple = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 113 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 231 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A = 16 , _A = 88 , _A = None , _A = 1 , _A = 0.0 , _A = 32 , _A = None , _A = False , _A = None , _A = None , _A = "geglu" , _A = None , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE = [1, 0]
def _A ( self , _A , _A , _A=None , _A=None , _A=None , _A = True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A )
| 148 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
def UpperCamelCase ( __lowerCamelCase : int ):
if any(not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(lowerCamelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCamelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 204 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure) | 498 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ =SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__ , lowerCamelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCAmelCase__ =features.copy() if features else default_expected_features
UpperCAmelCase__ =(
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ =SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( A ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con:
UpperCAmelCase__ =con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ =os.path.join(lowerCamelCase__ , "tmp.sql" )
UpperCAmelCase__ =SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase__ =iter_sql_file(lowerCamelCase__ )
UpperCAmelCase__ =iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ =os.path.join(lowerCamelCase__ , "tmp.sql" )
UpperCAmelCase__ =SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase__ =iter_sql_file(lowerCamelCase__ )
UpperCAmelCase__ =iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =tmp_path / """cache"""
UpperCAmelCase__ =os.path.join(lowerCamelCase__ , "tmp.sql" )
UpperCAmelCase__ =SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase__ ).read()
with pytest.raises(lowerCamelCase__ ):
SqlDatasetWriter(lowerCamelCase__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 625 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a_ : Optional[Any] = pytest.mark.integration
a_ : Any = {'''comet'''}
a_ : Union[str, Any] = importlib.util.find_spec('fairseq') is not None
a_ : Any = {'''code_eval'''}
a_ : List[str] = os.name == '''nt'''
a_ : int = {'''bertscore''', '''frugalscore''', '''perplexity'''}
a_ : Dict = importlib.util.find_spec('transformers') is not None
def lowerCamelCase__ (_UpperCAmelCase):
@wraps(lowerCamelCase__)
def wrapper(self , _UpperCAmelCase):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('\"test requires Fairseq\"')
else:
test_case(self , lowerCamelCase__)
return wrapper
def lowerCamelCase__ (_UpperCAmelCase):
@wraps(lowerCamelCase__)
def wrapper(self , _UpperCAmelCase):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('\"test requires transformers\"')
else:
test_case(self , lowerCamelCase__)
return wrapper
def lowerCamelCase__ (_UpperCAmelCase):
@wraps(lowerCamelCase__)
def wrapper(self , _UpperCAmelCase):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('\"test not supported on Windows\"')
else:
test_case(self , lowerCamelCase__)
return wrapper
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob('./metrics/*/')]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@local
class _snake_case ( parameterized.TestCase ):
_lowercase : Dict = {}
_lowercase : Tuple = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning')
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning')
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
SCREAMING_SNAKE_CASE = """[...]"""
SCREAMING_SNAKE_CASE = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , a)).module_path)
SCREAMING_SNAKE_CASE = datasets.load.import_main_class(metric_module.__name__ , dataset=a)
# check parameters
SCREAMING_SNAKE_CASE = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(a , metric_module.__name__):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE = doctest.testmod(a , verbose=a , raise_on_error=a)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
SCREAMING_SNAKE_CASE = """[...]"""
SCREAMING_SNAKE_CASE = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , a)).module_path)
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE = doctest.testmod(a , verbose=a , raise_on_error=a)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> str:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](a):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
def load_local_metric(a , *a , **a):
return load_metric(os.path.join('metrics' , a) , *a , **a)
with patch('datasets.load_metric') as mock_load_metric:
SCREAMING_SNAKE_CASE = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a) -> List[str]:
def wrapper(a):
SCREAMING_SNAKE_CASE = contextmanager(a)
SCREAMING_SNAKE_CASE = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt')
def lowerCamelCase__ (_UpperCAmelCase):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '') # handle pytest cli flags
class _snake_case ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
assert len(input_dict['input_ids']) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor') as mock_create_predictor:
SCREAMING_SNAKE_CASE = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore')
def lowerCamelCase__ (_UpperCAmelCase):
import torch
def bert_cos_score_idf(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCamelCase__))
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model'), patch(
'bert_score.scorer.bert_cos_score_idf') as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet')
def lowerCamelCase__ (_UpperCAmelCase):
def load_from_checkpoint(_UpperCAmelCase):
class _snake_case :
def SCREAMING_SNAKE_CASE__ ( self , a , *a , **a) -> Any:
assert len(a) == 2
SCREAMING_SNAKE_CASE = [0.19, 0.92]
return scores, sum(a) / len(a)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model') as mock_download_model:
SCREAMING_SNAKE_CASE = None
with patch('comet.load_from_checkpoint') as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE = load_from_checkpoint
yield
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = load_metric(os.path.join('metrics' , 'seqeval'))
SCREAMING_SNAKE_CASE = """ERROR"""
SCREAMING_SNAKE_CASE = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__)):
metric.compute(predictions=[] , references=[] , scheme=lowerCamelCase__)
| 73 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE( __UpperCAmelCase , unittest.TestCase ):
_UpperCAmelCase = KandinskyVaaImgaImgPipeline
_UpperCAmelCase = ['image_embeds', 'negative_image_embeds', 'image']
_UpperCAmelCase = [
'image_embeds',
'negative_image_embeds',
'image',
]
_UpperCAmelCase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase = False
@property
def lowerCAmelCase_ ( self: Tuple ) -> List[Any]:
return 32
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
return 32
@property
def lowerCAmelCase_ ( self: Dict ) -> int:
return self.time_input_dim
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]:
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self: Tuple ) -> str:
return 1_00
@property
def lowerCAmelCase_ ( self: str ) -> int:
torch.manual_seed(0 )
snake_case__ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
torch.manual_seed(0 )
snake_case__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case__ = self.dummy_unet
snake_case__ = self.dummy_movq
snake_case__ = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case__ = DDIMScheduler(**UpperCamelCase )
snake_case__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Dict , UpperCamelCase: List[str]=0 ) -> int:
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase )
# create init_image
snake_case__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
if str(UpperCamelCase ).startswith('mps' ):
snake_case__ = torch.manual_seed(UpperCamelCase )
else:
snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case__ = """cpu"""
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**UpperCamelCase )
snake_case__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
snake_case__ = output.images
snake_case__ = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: str ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict:
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case__ = """A red cartoon frog, 4k"""
snake_case__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
snake_case__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
snake_case__ = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case__ = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ = pipeline(
image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 328 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : str , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
_A = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def _snake_case ( _snake_case : List[str] , _snake_case : Tuple ) -> List[str]:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_A = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
_A = in_proj_weight[
: encoder_config.hidden_size, :
]
_A = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_A = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = dct.pop(lowerCamelCase__ )
_A = val
def _snake_case ( _snake_case : Any ) -> Optional[int]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
_A = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_A = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_A = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('RGB' )
return im
@torch.no_grad()
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
_A = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_A = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_A = 10_24
_A = 40_96
_A = 24
_A = 16
_A = 10_24
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_A = False
_A = """relu"""
_A = 10_24
_A = True
_A = False
_A = False
# load HuggingFace model
_A = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
_A = TrOCRForCausalLM(lowerCamelCase__ )
_A = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
_A = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='cpu' , check_hash=lowerCamelCase__ )["""model"""]
_A = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_A = state_dict.pop(lowerCamelCase__ )
if key.startswith('decoder' ) and "output_projection" not in key:
_A = val
else:
_A = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
_A = ViTImageProcessor(size=encoder_config.image_size )
_A = RobertaTokenizer.from_pretrained('roberta-large' )
_A = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
_A = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors='pt' ).pixel_values
# verify logits
_A = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_A = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
_A = outputs.logits
_A = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_A = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_A = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_A = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_A = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
a = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
"""simple docstring"""
UpperCamelCase__ = range(2, 2_0 + 1)
UpperCamelCase__ = [1_0**k for k in range(ks[-1] + 1)]
UpperCamelCase__ = {}
def UpperCAmelCase ( snake_case : Dict , snake_case : Any , snake_case : Optional[Any] , snake_case : Dict ):
_lowerCAmelCase:Dict = sum(a_i[j] for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ) )
_lowerCAmelCase:Tuple = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase__ ) , lowerCamelCase__ ) ) )
_lowerCAmelCase:Tuple = 0, 0
_lowerCAmelCase:List[str] = n - i
_lowerCAmelCase:Optional[Any] = memo.get(lowerCamelCase__ )
if sub_memo is not None:
_lowerCAmelCase:Dict = sub_memo.get(lowerCamelCase__ )
if jumps is not None and len(lowerCamelCase__ ) > 0:
# find and make the largest jump without going over
_lowerCAmelCase:Any = -1
for _k in range(len(lowerCamelCase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCAmelCase:Tuple = _k
break
if max_jump >= 0:
_lowerCAmelCase:List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCAmelCase:str = diff + c
for j in range(min(lowerCamelCase__ , len(lowerCamelCase__ ) ) ):
_lowerCAmelCase:Union[str, Any] = divmod(lowerCamelCase__ , 10 )
if new_c > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
_lowerCAmelCase:int = []
else:
_lowerCAmelCase:Tuple = {c: []}
_lowerCAmelCase:Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCAmelCase:Tuple = next_term(lowerCamelCase__ , k - 1 , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCAmelCase:Optional[Any] = compute(lowerCamelCase__ , lowerCamelCase__ , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
_lowerCAmelCase:Dict = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCAmelCase:int = 0
while j < len(lowerCamelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase__ , (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase ( snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : str , snake_case : List[str] ):
if i >= n:
return 0, i
if k > len(lowerCamelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCAmelCase:Any = i
_lowerCAmelCase:Union[str, Any] = 0, 0, 0
for j in range(len(lowerCamelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCAmelCase:Tuple = ds_c + ds_b
diff += addend
_lowerCAmelCase:str = 0
for j in range(lowerCamelCase__ ):
_lowerCAmelCase:Optional[int] = a_i[j] + addend
_lowerCAmelCase:Optional[Any] = divmod(lowerCamelCase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return diff, i - start_i
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : str , snake_case : List[Any] ):
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
_lowerCAmelCase:List[Any] = digits[j] + addend
if s >= 10:
_lowerCAmelCase:List[str] = divmod(lowerCamelCase__ , 10 )
_lowerCAmelCase:Union[str, Any] = addend // 10 + quotient
else:
_lowerCAmelCase:int = s
_lowerCAmelCase:Tuple = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCAmelCase:List[Any] = divmod(lowerCamelCase__ , 10 )
digits.append(lowerCamelCase__ )
def UpperCAmelCase ( snake_case : List[str] = 10**15 ):
_lowerCAmelCase:Optional[int] = [1]
_lowerCAmelCase:Optional[int] = 1
_lowerCAmelCase:Any = 0
while True:
_lowerCAmelCase:Union[str, Any] = next_term(lowerCamelCase__ , 20 , i + dn , lowerCamelCase__ )
dn += terms_jumped
if dn == n - i:
break
_lowerCAmelCase:str = 0
for j in range(len(lowerCamelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 227 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
'''simple docstring'''
import argparse
import copy
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = {}
with open(lowerCamelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
with open(lowerCamelCase__ ) as f:
snake_case_ = f.read(1 )
snake_case_ = start_node
snake_case_ = []
snake_case_ = start_node
snake_case_ = 0
while visiting not in first_solution:
snake_case_ = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCamelCase__ ) and k[0] not in first_solution:
snake_case_ = k[1]
snake_case_ = k[0]
first_solution.append(lowerCamelCase__ )
snake_case_ = distance_of_first_solution + int(lowerCamelCase__ )
snake_case_ = best_node
first_solution.append(lowerCamelCase__ )
snake_case_ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
for n in solution[1:-1]:
snake_case_ = solution.index(lowerCamelCase__ )
for kn in solution[1:-1]:
snake_case_ = solution.index(lowerCamelCase__ )
if n == kn:
continue
snake_case_ = copy.deepcopy(lowerCamelCase__ )
snake_case_ = kn
snake_case_ = n
snake_case_ = 0
for k in _tmp[:-1]:
snake_case_ = _tmp[_tmp.index(lowerCamelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ = distance + int(i[1] )
_tmp.append(lowerCamelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __UpperCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = 1
snake_case_ = first_solution
snake_case_ = []
snake_case_ = distance_of_first_solution
snake_case_ = solution
while count <= iters:
snake_case_ = find_neighborhood(lowerCamelCase__, lowerCamelCase__ )
snake_case_ = 0
snake_case_ = neighborhood[index_of_best_solution]
snake_case_ = len(lowerCamelCase__ ) - 1
snake_case_ = False
while not found:
snake_case_ = 0
while i < len(lowerCamelCase__ ):
if best_solution[i] != solution[i]:
snake_case_ = best_solution[i]
snake_case_ = solution[i]
break
snake_case_ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ = True
snake_case_ = best_solution[:-1]
snake_case_ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ = cost
snake_case_ = solution
else:
snake_case_ = index_of_best_solution + 1
snake_case_ = neighborhood[index_of_best_solution]
if len(lowerCamelCase__ ) >= size:
tabu_list.pop(0 )
snake_case_ = count + 1
return best_solution_ever, best_cost
def __magic_name__ ( __UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = generate_neighbours(args.File )
snake_case_ = generate_first_solution(
args.File, lowerCamelCase__ )
snake_case_ = tabu_search(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, args.Iterations, args.Size, )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 640 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
import os
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = len(grid[0] )
UpperCAmelCase__: str = len(lowerCamelCase__ )
UpperCAmelCase__: List[str] = 0
UpperCAmelCase__: int = 0
UpperCAmelCase__: List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowerCamelCase__ ):
for j in range(n_rows - 3 ):
UpperCAmelCase__: Any = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCAmelCase__: Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCAmelCase__: List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCAmelCase__: Optional[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCAmelCase__: int = max(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if max_product > largest:
UpperCAmelCase__: List[str] = max_product
return largest
def _A ( ):
UpperCAmelCase__: str = []
with open(os.path.dirname(lowerCamelCase__ ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
UpperCAmelCase__: List[Any] = [[int(lowerCamelCase__ ) for i in grid[j]] for j in range(len(lowerCamelCase__ ) )]
return largest_product(lowerCamelCase__ )
if __name__ == "__main__":
print(solution()) | 113 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
from collections import deque
def lowercase ( __snake_case : Union[str, Any] ):
lowercase_ : Optional[Any] = len(lowerCamelCase__ )
lowercase_ : int = deque()
lowercase_ : Dict = [False for _ in range(lowerCamelCase__ )]
lowercase_ : Dict = [-1 for _ in range(lowerCamelCase__ )]
lowercase_ : List[str] = index_of[:]
def strong_connect(__snake_case : Dict , __snake_case : int , __snake_case : Dict ):
lowercase_ : Dict = index # the number when this node is seen
lowercase_ : str = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowercase_ : Dict = True
for w in g[v]:
if index_of[w] == -1:
lowercase_ : int = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase_ : Tuple = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowercase_ : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = stack.pop()
lowercase_ : Tuple = False
component.append(lowerCamelCase__ )
while w != v:
lowercase_ : Optional[int] = stack.pop()
lowercase_ : Optional[int] = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowercase_ : Union[str, Any] = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowercase ( __snake_case : Any , __snake_case : str ):
lowercase_ : Union[str, Any] = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A : Optional[int] = 7
__A : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__A : Any = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__A : Any = [(u, v) for u, v in zip(source, target)]
__A : Union[str, Any] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 231 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
import math
def __lowercase ( a__ , a__ ) -> Tuple:
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCamelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 148 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
A__ : str = VQModel
A__ : Any = 'sample'
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str]=(32, 32) ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = 4
snake_case : Any = 3
snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
return (3, 32, 32)
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case : Tuple = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
snake_case : Tuple = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case__ )
snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Tuple = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case : str = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case : str = image.to(snake_case__ )
with torch.no_grad():
snake_case : Optional[Any] = model(snake_case__ ).sample
snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case : str = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
| 204 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
class _SCREAMING_SNAKE_CASE: # Public class to implement a graph
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = row
__SCREAMING_SNAKE_CASE :Optional[Any] = col
__SCREAMING_SNAKE_CASE :Union[str, Any] = graph
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE :Tuple = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE :Optional[Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] ,j + col_nbr[k] ,SCREAMING_SNAKE_CASE__ ):
self.diffs(i + row_nbr[k] ,j + col_nbr[k] ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Dict: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE :Dict = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
count += 1
return count | 498 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''nielsr/canine-s''': 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCamelCase_ = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCamelCase_ = 0
UpperCamelCase_ = 0xe_000
UpperCamelCase_ = 0xe_001
UpperCamelCase_ = 0xe_002
UpperCamelCase_ = 0xe_003
UpperCamelCase_ = 0xe_004
# Maps special codepoints to human-readable names.
UpperCamelCase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCamelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class snake_case_ ( __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, A_=chr(A_ ), A_=chr(A_ ), A_=chr(A_ ), A_=chr(A_ ), A_=chr(A_ ), A_=chr(A_ ), A_=False, A_=2048, **A_, ) -> Optional[int]:
UpperCAmelCase__ =AddedToken(A_, lstrip=A_, rstrip=A_ ) if isinstance(A_, A_ ) else bos_token
UpperCAmelCase__ =AddedToken(A_, lstrip=A_, rstrip=A_ ) if isinstance(A_, A_ ) else eos_token
UpperCAmelCase__ =AddedToken(A_, lstrip=A_, rstrip=A_ ) if isinstance(A_, A_ ) else sep_token
UpperCAmelCase__ =AddedToken(A_, lstrip=A_, rstrip=A_ ) if isinstance(A_, A_ ) else cls_token
UpperCAmelCase__ =AddedToken(A_, lstrip=A_, rstrip=A_ ) if isinstance(A_, A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ =AddedToken(A_, lstrip=A_, rstrip=A_ ) if isinstance(A_, A_ ) else mask_token
super().__init__(
bos_token=A_, eos_token=A_, sep_token=A_, cls_token=A_, pad_token=A_, mask_token=A_, add_prefix_space=A_, model_max_length=A_, **A_, )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase__ ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase__ =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase__ ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase__ =UNICODE_VOCAB_SIZE
UpperCAmelCase__ =len(self._special_codepoints )
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
return self._unicode_vocab_size
def __UpperCAmelCase ( self, A_ ) -> int:
return list(A_ )
def __UpperCAmelCase ( self, A_ ) -> Tuple:
try:
return ord(A_ )
except TypeError:
raise ValueError(f"""invalid token: \'{token}\'""" )
def __UpperCAmelCase ( self, A_ ) -> Optional[Any]:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(A_ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def __UpperCAmelCase ( self, A_ ) -> Optional[Any]:
return "".join(A_ )
def __UpperCAmelCase ( self, A_, A_ = None ) -> int:
UpperCAmelCase__ =[self.sep_token_id]
UpperCAmelCase__ =[self.cls_token_id]
UpperCAmelCase__ =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __UpperCAmelCase ( self, A_, A_ = None, A_ = False ) -> int:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_, token_ids_a=A_, already_has_special_tokens=A_ )
UpperCAmelCase__ =[1] + ([0] * len(A_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(A_ )) + [1]
return result
def __UpperCAmelCase ( self, A_, A_ = None ) -> Optional[int]:
UpperCAmelCase__ =[self.sep_token_id]
UpperCAmelCase__ =[self.cls_token_id]
UpperCAmelCase__ =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __UpperCAmelCase ( self, A_, A_ = None ) -> List[Any]:
return ()
| 625 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = '''▁'''
a_ : str = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
a_ : Optional[int] = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
a_ : Optional[Any] = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
a_ : Union[str, Any] = {
'''ernie-m-base''': 5_14,
'''ernie-m-large''': 5_14,
}
a_ : Tuple = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _snake_case ( __UpperCAmelCase ):
_lowercase : List[str] = ["input_ids"]
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self , a , a=None , a=False , a="utf8" , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a = None , **a , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , vocab_file=a , encoding=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = sentencepiece_model_ckpt
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a)
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
SCREAMING_SNAKE_CASE = self.load_vocab(filepath=a)
else:
SCREAMING_SNAKE_CASE = {self.sp_model.id_to_piece(a): id for id in range(self.sp_model.get_piece_size())}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
if text is None:
return None
SCREAMING_SNAKE_CASE = self.tokenize(a)
SCREAMING_SNAKE_CASE = """""", []
for i, ch in enumerate(a):
if ch in self.SP_CHAR_MAPPING:
SCREAMING_SNAKE_CASE = self.SP_CHAR_MAPPING.get(a)
else:
SCREAMING_SNAKE_CASE = unicodedata.normalize('NFKC' , a)
if self.is_whitespace(a):
continue
normalized_text += ch
char_mapping.extend([i] * len(a))
SCREAMING_SNAKE_CASE = normalized_text, [], 0
if self.do_lower_case:
SCREAMING_SNAKE_CASE = text.lower()
for token in split_tokens:
if token[:1] == "▁":
SCREAMING_SNAKE_CASE = token[1:]
SCREAMING_SNAKE_CASE = text[offset:].index(a) + offset
SCREAMING_SNAKE_CASE = start + len(a)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
SCREAMING_SNAKE_CASE = end
return token_mapping
@property
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return len(self.vocab)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return dict(self.vocab , **self.added_tokens_encoder)
def __getstate__( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> List[Any]:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
return "".join((self.SP_CHAR_MAPPING.get(a , a) for c in text))
def SCREAMING_SNAKE_CASE__ ( self , a , a=False , a=64 , a=0.1) -> Optional[Any]:
if self.sp_model_kwargs.get('enable_sampling') is True:
SCREAMING_SNAKE_CASE = True
if self.sp_model_kwargs.get('alpha') is not None:
SCREAMING_SNAKE_CASE = self.sp_model_kwargs.get('alpha')
if self.sp_model_kwargs.get('nbest_size') is not None:
SCREAMING_SNAKE_CASE = self.sp_model_kwargs.get('nbest_size')
if not enable_sampling:
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(a)
else:
SCREAMING_SNAKE_CASE = self.sp_model.SampleEncodeAsPieces(a , a , a)
SCREAMING_SNAKE_CASE = []
for pi, piece in enumerate(a):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(a) and pi != 0:
new_pieces.append(a)
continue
else:
continue
SCREAMING_SNAKE_CASE = 0
for i, chunk in enumerate(a):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(a) or self.is_punct(a):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(a)
SCREAMING_SNAKE_CASE = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
SCREAMING_SNAKE_CASE = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
SCREAMING_SNAKE_CASE = i
if len(a) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = """""".join(a).replace(a , ' ').strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(a)
SCREAMING_SNAKE_CASE = """""".join(a).replace(a , ' ').strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
return self.vocab.get(a , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
return self.reverse_vocab.get(a , self.unk_token)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> Dict:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> Optional[Any]:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=False) -> str:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a)) + [1, 1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Union[str, Any]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(a) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(a) + 1) + [1] * (len(a) + 3)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(a) == 1:
SCREAMING_SNAKE_CASE = unicodedata.category(a)
if cat == "Zs":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
SCREAMING_SNAKE_CASE = {}
with io.open(a , 'r' , encoding='utf-8') as f:
for index, line in enumerate(a):
SCREAMING_SNAKE_CASE = line.rstrip('\n')
SCREAMING_SNAKE_CASE = int(a)
return token_to_idx
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Dict:
SCREAMING_SNAKE_CASE = 0
if os.path.isdir(a):
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
SCREAMING_SNAKE_CASE = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(a , 'w' , encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda a: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!')
SCREAMING_SNAKE_CASE = token_index
writer.write(token + '\n')
index += 1
SCREAMING_SNAKE_CASE = os.path.join(a , 'sentencepiece.bpe.model')
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (vocab_file,)
| 73 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def a_ ( _A , _A , _A , _A , _A ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('.' ):
snake_case__ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
snake_case__ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
snake_case__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case__ = value
elif weight_type == "weight_g":
snake_case__ = value
elif weight_type == "weight_v":
snake_case__ = value
elif weight_type == "bias":
snake_case__ = value
else:
snake_case__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( _A , _A , _A ) -> str:
"""simple docstring"""
snake_case__ = []
snake_case__ = fairseq_model.state_dict()
snake_case__ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
snake_case__ = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
snake_case__ = True
if "*" in mapped_key:
snake_case__ = name.split(lowerCamelCase__ )[0].split('.' )[-2]
snake_case__ = mapped_key.replace('*' , lowerCamelCase__ )
if "weight_g" in name:
snake_case__ = """weight_g"""
elif "weight_v" in name:
snake_case__ = """weight_v"""
elif "weight" in name:
snake_case__ = """weight"""
elif "bias" in name:
snake_case__ = """bias"""
else:
snake_case__ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a_ ( _A , _A , _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = full_name.split('conv_layers.' )[-1]
snake_case__ = name.split('.' )
snake_case__ = int(items[0] )
snake_case__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def a_ ( _A , _A , _A=None , _A=None , _A=True ) -> str:
"""simple docstring"""
if config_path is not None:
snake_case__ = HubertConfig.from_pretrained(lowerCamelCase__ )
else:
snake_case__ = HubertConfig()
if is_finetuned:
if dict_path:
snake_case__ = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ = target_dict.pad_index
snake_case__ = target_dict.bos_index
snake_case__ = target_dict.eos_index
snake_case__ = len(target_dict.symbols )
snake_case__ = os.path.join(lowerCamelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , lowerCamelCase__ )
snake_case__ = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase__ , )
snake_case__ = True if config.feat_extract_norm == """layer""" else False
snake_case__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
snake_case__ = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
snake_case__ = HubertForCTC(lowerCamelCase__ )
else:
snake_case__ = HubertModel(lowerCamelCase__ )
if is_finetuned:
snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case__ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 328 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = FileLock(str(tmpdir / 'foo.lock' ) )
_A = FileLock(str(tmpdir / 'foo.lock' ) )
_A = 0.01
with locka.acquire():
with pytest.raises(lowerCamelCase__ ):
_A = time.time()
locka.acquire(lowerCamelCase__ )
assert time.time() - _start > timeout
def _snake_case ( _snake_case : int ) -> Optional[Any]:
'''simple docstring'''
_A = """a""" * 10_00 + """.lock"""
_A = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(lowerCamelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
_A = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowerCamelCase__ ):
locka.acquire(0 )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Dict = 0
_lowerCAmelCase:Tuple = [0]
_lowerCAmelCase:int = [0]
_lowerCAmelCase:str = len(a__)
self.assertEqual(k.knapsack(a__ ,a__ ,a__ ,a__) ,0)
_lowerCAmelCase:Any = [60]
_lowerCAmelCase:Optional[int] = [10]
_lowerCAmelCase:str = len(a__)
self.assertEqual(k.knapsack(a__ ,a__ ,a__ ,a__) ,0)
def __UpperCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = 3
_lowerCAmelCase:Optional[int] = [1, 2, 3]
_lowerCAmelCase:Optional[int] = [3, 2, 1]
_lowerCAmelCase:List[Any] = len(a__)
self.assertEqual(k.knapsack(a__ ,a__ ,a__ ,a__) ,5)
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase:Any = 50
_lowerCAmelCase:List[str] = [60, 100, 120]
_lowerCAmelCase:Dict = [10, 20, 30]
_lowerCAmelCase:Optional[int] = len(a__)
self.assertEqual(k.knapsack(a__ ,a__ ,a__ ,a__) ,220)
if __name__ == "__main__":
unittest.main()
| 227 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return getitem, k
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return setitem, k, v
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return delitem, k
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, *__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
try:
return fun(lowerCamelCase__, *lowerCamelCase__ ), None
except Exception as e:
return None, e
a : int = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a : Tuple = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'''operations''', (
pytest.param(_add_items, id='''add items''' ),
pytest.param(_overwrite_items, id='''overwrite items''' ),
pytest.param(_delete_items, id='''delete items''' ),
pytest.param(_access_absent_items, id='''access absent items''' ),
pytest.param(_add_with_resize_up, id='''add with resize up''' ),
pytest.param(_add_with_resize_down, id='''add with resize down''' ),
), )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = HashMap(initial_block_size=4 )
snake_case_ = {}
for _, (fun, *args) in enumerate(lowerCamelCase__ ):
snake_case_ = _run_operation(lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__ )
snake_case_ = _run_operation(lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__ )
assert my_res == py_res
assert str(lowerCamelCase__ ) == str(lowerCamelCase__ )
assert set(lowerCamelCase__ ) == set(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
assert set(my.items() ) == set(py.items() )
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
def is_public(__UpperCAmelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ = {name for name in dir({} ) if is_public(lowerCamelCase__ )}
snake_case_ = {name for name in dir(HashMap() ) if is_public(lowerCamelCase__ )}
assert dict_public_names > hash_public_names
| 640 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCAmelCase : Union[str, Any] ={
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int =[
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple =['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 113 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : Dict ):
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
lowercase_ : Dict = 4
lowercase_ : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowercase_ : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 231 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
def __lowercase ( a__ , a__ , a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = [False] * len(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = []
queue.append(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = True
while queue:
__SCREAMING_SNAKE_CASE = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = u
return visited[t]
def __lowercase ( a__ , a__ , a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = [-1] * (len(lowerCamelCase__ ))
__SCREAMING_SNAKE_CASE = 0
while bfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__SCREAMING_SNAKE_CASE = float('Inf' )
__SCREAMING_SNAKE_CASE = sink
while s != source:
# Find the minimum value in select path
__SCREAMING_SNAKE_CASE = min(lowerCamelCase__ , graph[parent[s]][s] )
__SCREAMING_SNAKE_CASE = parent[s]
max_flow += path_flow
__SCREAMING_SNAKE_CASE = sink
while v != source:
__SCREAMING_SNAKE_CASE = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__SCREAMING_SNAKE_CASE = parent[v]
return max_flow
lowerCAmelCase__ : Optional[int] =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase__ : Optional[Any] =0, 5
print(ford_fulkerson(graph, source, sink))
| 148 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
snake_case : list[list[int]] = []
create_all_state(1 , lowerCamelCase__ , lowerCamelCase__ , [] , lowerCamelCase__ )
return result
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowerCamelCase__ , total_number - level + 2 ):
current_list.append(lowerCamelCase__ )
create_all_state(i + 1 , lowerCamelCase__ , level - 1 , lowerCamelCase__ , lowerCamelCase__ )
current_list.pop()
def UpperCamelCase ( __lowerCamelCase : str ):
for i in total_list:
print(*lowerCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = 4
__lowerCamelCase = 2
__lowerCamelCase = generate_all_combinations(n, k)
print_all_state(total_list)
| 204 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE :List[Any] = BlipImageProcessor()
__SCREAMING_SNAKE_CASE :Optional[int] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__SCREAMING_SNAKE_CASE :Any = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__SCREAMING_SNAKE_CASE :Dict = InstructBlipProcessor(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ ).tokenizer
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ ).image_processor
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ ).qformer_tokenizer
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE :Optional[Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,)
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE :Tuple = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE :str = InstructBlipProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor.qformer_tokenizer ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :List[str] = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :int = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ ,qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE :Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='''np''' )
__SCREAMING_SNAKE_CASE :Dict = processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Any = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :List[str] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ ,qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = """lower newer"""
__SCREAMING_SNAKE_CASE :List[Any] = processor(text=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = qformer_tokenizer(SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor['''qformer_''' + key] )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :Any = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ ,qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = """lower newer"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE :Tuple = processor(text=SCREAMING_SNAKE_CASE__ ,images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(inputs.keys() ) ,['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] ,)
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE :str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :int = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ ,qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE :Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :Optional[int] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ ,qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = """lower newer"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE :Any = processor(text=SCREAMING_SNAKE_CASE__ ,images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(inputs.keys() ) ,['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] ,) | 498 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case_ ( ctypes.Structure ):
'''simple docstring'''
__UpperCamelCase = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase__ =CursorInfo()
UpperCAmelCase__ =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
UpperCAmelCase__ =False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase__ =CursorInfo()
UpperCAmelCase__ =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
UpperCAmelCase__ =True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def _UpperCAmelCase ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 625 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case ( __UpperCAmelCase ):
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
class _snake_case ( nn.Module ):
_lowercase : int
_lowercase : Tuple[int] = (16, 32, 96, 2_56)
_lowercase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels) - 1):
SCREAMING_SNAKE_CASE = self.block_out_channels[i]
SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a)
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a)
SCREAMING_SNAKE_CASE = blocks
SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = self.conv_in(a)
SCREAMING_SNAKE_CASE = nn.silu(a)
for block in self.blocks:
SCREAMING_SNAKE_CASE = block(a)
SCREAMING_SNAKE_CASE = nn.silu(a)
SCREAMING_SNAKE_CASE = self.conv_out(a)
return embedding
@flax_register_to_config
class _snake_case ( nn.Module , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : int = 32
_lowercase : int = 4
_lowercase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowercase : Union[bool, Tuple[bool]] = False
_lowercase : Tuple[int] = (3_20, 6_40, 12_80, 12_80)
_lowercase : int = 2
_lowercase : Union[int, Tuple[int]] = 8
_lowercase : Optional[Union[int, Tuple[int]]] = None
_lowercase : int = 12_80
_lowercase : float = 0.0
_lowercase : bool = False
_lowercase : jnp.dtype = jnp.floataa
_lowercase : bool = True
_lowercase : int = 0
_lowercase : str = "rgb"
_lowercase : Tuple[int] = (16, 32, 96, 2_56)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
# init input tensors
SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE = jnp.zeros(a , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa)
SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE = jnp.zeros(a , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE = jax.random.split(a)
SCREAMING_SNAKE_CASE = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(a , a , a , a , a)["params"]
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.block_out_channels
SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(a , dtype=self.dtype)
SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(a , a):
SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types)
if isinstance(a , a):
SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types)
# down
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = block_out_channels[0]
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a)
for i, down_block_type in enumerate(self.down_block_types):
SCREAMING_SNAKE_CASE = output_channel
SCREAMING_SNAKE_CASE = block_out_channels[i]
SCREAMING_SNAKE_CASE = i == len(a) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a)
for _ in range(self.layers_per_block):
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a)
if not is_final_block:
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a)
SCREAMING_SNAKE_CASE = down_blocks
SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE = block_out_channels[-1]
SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a , a , a , a , a = 1.0 , a = True , a = False , ) -> str:
SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE = jnp.flip(a , axis=1)
# 1. time
if not isinstance(a , jnp.ndarray):
SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(a , jnp.ndarray) and len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa)
SCREAMING_SNAKE_CASE = jnp.expand_dims(a , 0)
SCREAMING_SNAKE_CASE = self.time_proj(a)
SCREAMING_SNAKE_CASE = self.time_embedding(a)
# 2. pre-process
SCREAMING_SNAKE_CASE = jnp.transpose(a , (0, 2, 3, 1))
SCREAMING_SNAKE_CASE = self.conv_in(a)
SCREAMING_SNAKE_CASE = jnp.transpose(a , (0, 2, 3, 1))
SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(a)
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(a , a):
SCREAMING_SNAKE_CASE = down_block(a , a , a , deterministic=not train)
else:
SCREAMING_SNAKE_CASE = down_block(a , a , deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE = self.mid_block(a , a , a , deterministic=not train)
# 5. contronet blocks
SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(a , self.controlnet_down_blocks):
SCREAMING_SNAKE_CASE = controlnet_block(a)
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE = self.controlnet_mid_block(a)
# 6. scaling
SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a , mid_block_res_sample=a)
| 73 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE( __UpperCAmelCase , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
snake_case__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
snake_case__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ = {"""unk_token""": """[UNK]"""}
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def lowerCAmelCase_ ( self: List[Any] , **UpperCamelCase: List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Tuple ) -> List[Any]:
snake_case__ = """lower newer"""
snake_case__ = """lower newer"""
return input_text, output_text
def lowerCAmelCase_ ( self: str ) -> Optional[Any]:
snake_case__ = self.get_tokenizer()
snake_case__ = """lower newer"""
snake_case__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = tokens + [tokenizer.unk_token]
snake_case__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
snake_case__ = self.get_tokenizer()
snake_case__ = tokenizer('Hello' , 'World' )
snake_case__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , UpperCamelCase )
@slow
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
snake_case__ = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase )
snake_case__ = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase )
snake_case__ = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
snake_case__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
snake_case__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
snake_case__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase_ ( self: str ) -> Tuple:
snake_case__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case__ = tokenizer_class.from_pretrained('microsoft/deberta-base' )
snake_case__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
snake_case__ = tokenizer(UpperCamelCase , padding=UpperCamelCase )
snake_case__ = [tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) for seq in encoding["""input_ids"""]]
# fmt: off
snake_case__ = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , UpperCamelCase )
for expected, decoded in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 328 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _snake_case ( ) -> Dict:
'''simple docstring'''
_A = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCamelCase__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCamelCase__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCamelCase__ )
return parser.parse_args()
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = parse_args()
# Import training_script as a module.
_A = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_A = script_fpath.stem
_A = importlib.import_module(lowerCamelCase__ )
# Patch sys.argv
_A = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
'''simple docstring'''
import sys
a : Union[str, Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __magic_name__ ( __UpperCAmelCase = N ) -> int:
'''simple docstring'''
snake_case_ = -sys.maxsize - 1
for i in range(len(lowerCamelCase__ ) - 12 ):
snake_case_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 640 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _A ( ):
UpperCAmelCase__: str = ArgumentParser("Accelerate CLI tool" ,usage="accelerate <command> [<args>]" ,allow_abbrev=lowerCamelCase__ )
UpperCAmelCase__: List[str] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCamelCase__ )
env_command_parser(subparsers=lowerCamelCase__ )
launch_command_parser(subparsers=lowerCamelCase__ )
tpu_command_parser(subparsers=lowerCamelCase__ )
test_command_parser(subparsers=lowerCamelCase__ )
# Let's go
UpperCAmelCase__: Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase__ ,"func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main() | 113 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
import baseaa
def lowercase ( __snake_case : Optional[int] ):
return baseaa.baaencode(string.encode('''utf-8''' ) )
def lowercase ( __snake_case : Dict ):
return baseaa.baadecode(lowerCamelCase__ ).decode('''utf-8''' )
if __name__ == "__main__":
__A : int = '''Hello World!'''
__A : List[Any] = baseaa_encode(test)
print(encoded)
__A : Union[str, Any] = baseaa_decode(encoded)
print(decoded)
| 231 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Dict ={'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] =[
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 148 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
snake_case : Dict = list(range(len(lowerCamelCase__ ) ) )
snake_case : Optional[Any] = [v / w for v, w in zip(lowerCamelCase__ , lowerCamelCase__ )]
index.sort(key=lambda __lowerCamelCase : ratio[i] , reverse=lowerCamelCase__ )
snake_case : float = 0
snake_case : list[float] = [0] * len(lowerCamelCase__ )
for i in index:
if weight[i] <= capacity:
snake_case : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case : List[str] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure) | 498 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_=3, A_=32, A_=3, A_=10, A_=[10, 20, 30, 40], A_=[1, 1, 2, 1], A_=True, A_=True, A_="relu", A_=3, A_=None, ) -> Any:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =embeddings_size
UpperCAmelCase__ =hidden_sizes
UpperCAmelCase__ =depths
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =num_labels
UpperCAmelCase__ =scope
UpperCAmelCase__ =len(A_ )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase__ =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> List[Any]:
return ResNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> str:
UpperCAmelCase__ =TFResNetModel(config=A_ )
UpperCAmelCase__ =model(A_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =self.num_labels
UpperCAmelCase__ =TFResNetForImageClassification(A_ )
UpperCAmelCase__ =model(A_, labels=A_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ =config_and_inputs
UpperCAmelCase__ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case_ ( __UpperCAmelCase, __UpperCAmelCase, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__UpperCamelCase = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =TFResNetModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=A_, has_text_modality=A_ )
def __UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> Optional[int]:
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def __UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def __UpperCAmelCase ( self ) -> List[Any]:
pass
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
UpperCAmelCase__ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ =[*signature.parameters.keys()]
UpperCAmelCase__ =["""pixel_values"""]
self.assertListEqual(arg_names[:1], A_ )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(A_, A_, A_ ):
UpperCAmelCase__ =model_class(A_ )
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ =self.model_tester.num_stages
self.assertEqual(len(A_ ), expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase__ =layer_type
UpperCAmelCase__ =True
check_hidden_states_output(A_, A_, A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ =True
check_hidden_states_output(A_, A_, A_ )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __UpperCAmelCase ( self ) -> List[str]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ =TFResNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> Tuple:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ =self.default_image_processor
UpperCAmelCase__ =prepare_img()
UpperCAmelCase__ =image_processor(images=A_, return_tensors="tf" )
# forward pass
UpperCAmelCase__ =model(**A_ )
# verify the logits
UpperCAmelCase__ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, A_ )
UpperCAmelCase__ =tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), A_, atol=1E-4 ) )
| 625 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase__ (_UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(add_help=lowerCamelCase__ , allow_abbrev=lowerCamelCase__)
# The main config parser
SCREAMING_SNAKE_CASE = config_command_parser(lowerCamelCase__)
# The subparser to add commands to
SCREAMING_SNAKE_CASE = config_parser.add_subparsers(title='subcommands' , dest='subcommand')
# Then add other parsers with the parent parser
default_command_parser(lowerCamelCase__ , parents=[parent_parser])
update_command_parser(lowerCamelCase__ , parents=[parent_parser])
return config_parser
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = get_config_parser()
SCREAMING_SNAKE_CASE = config_parser.parse_args()
if not hasattr(lowerCamelCase__ , 'func'):
config_parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase__)
if __name__ == "__main__":
main()
| 73 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __SCREAMING_SNAKE_CASE( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_UpperCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: int=False ) -> List[Any]:
snake_case__ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class in get_values(UpperCamelCase ):
snake_case__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __SCREAMING_SNAKE_CASE( __UpperCAmelCase ):
def __init__( self: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any=13 , UpperCamelCase: int=7 , UpperCamelCase: List[str]=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: int=True , UpperCamelCase: Dict=True , UpperCamelCase: Tuple=99 , UpperCamelCase: List[Any]=32 , UpperCamelCase: Tuple=32 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: str="gelu" , UpperCamelCase: int=0.1 , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Optional[int]=5_12 , UpperCamelCase: List[Any]=16 , UpperCamelCase: List[Any]=2 , UpperCamelCase: Tuple=0.02 , UpperCamelCase: List[Any]=3 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: Optional[Any]=None , ) -> Dict:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
snake_case__ = embedding_size
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: str , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: List[str] ) -> List[str]:
snake_case__ = TFMobileBertModel(config=UpperCamelCase )
snake_case__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ = model(UpperCamelCase )
snake_case__ = [input_ids, input_mask]
snake_case__ = model(UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Optional[Any]:
snake_case__ = TFMobileBertForMaskedLM(config=UpperCamelCase )
snake_case__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[str] ) -> Optional[Any]:
snake_case__ = TFMobileBertForNextSentencePrediction(config=UpperCamelCase )
snake_case__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: str ) -> Any:
snake_case__ = TFMobileBertForPreTraining(config=UpperCamelCase )
snake_case__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: Tuple , UpperCamelCase: List[str] , UpperCamelCase: str ) -> List[Any]:
snake_case__ = self.num_labels
snake_case__ = TFMobileBertForSequenceClassification(config=UpperCamelCase )
snake_case__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: str , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Any ) -> int:
snake_case__ = self.num_choices
snake_case__ = TFMobileBertForMultipleChoice(config=UpperCamelCase )
snake_case__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: str , UpperCamelCase: Tuple , UpperCamelCase: List[str] ) -> List[str]:
snake_case__ = self.num_labels
snake_case__ = TFMobileBertForTokenClassification(config=UpperCamelCase )
snake_case__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: Dict , UpperCamelCase: int ) -> str:
snake_case__ = TFMobileBertForQuestionAnswering(config=UpperCamelCase )
snake_case__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self: Tuple ) -> str:
snake_case__ = self.prepare_config_and_inputs()
(
snake_case__
) = config_and_inputs
snake_case__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self: Dict ) -> Dict:
snake_case__ = TFMobileBertModelTest.TFMobileBertModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> Any:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase )
def lowerCAmelCase_ ( self: str ) -> Any:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> Union[str, Any]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase )
def lowerCAmelCase_ ( self: Any ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase )
def lowerCAmelCase_ ( self: Any ) -> Optional[int]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase )
@slow
def lowerCAmelCase_ ( self: Any ) -> int:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
snake_case__ = TFMobileBertModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_tf
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self: Any ) -> List[Any]:
snake_case__ = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
snake_case__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ = model(UpperCamelCase )[0]
snake_case__ = [1, 6, 3_05_22]
self.assertEqual(output.shape , UpperCamelCase )
snake_case__ = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase , atol=1e-4 )
| 328 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = LongformerTokenizer
UpperCAmelCase : List[Any] = True
UpperCAmelCase : str = LongformerTokenizerFast
UpperCAmelCase : int = True
def lowerCAmelCase_ ( self : List[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_A = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A = {"""unk_token""": """<unk>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[int] , **_UpperCAmelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int] ):
_A = """lower newer"""
_A = """lower newer"""
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = """lower newer"""
_A = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_A = tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_A = tokens + [tokenizer.unk_token]
_A = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
_A = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
_A = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_A = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
_A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ ( self : Dict ):
_A = self.get_tokenizer()
_A = """Encode this sequence."""
_A = tokenizer.byte_encoder[""" """.encode('utf-8' )[0]]
# Testing encoder arguments
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing spaces after special tokens
_A = """<mask>"""
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )} ) # mask token has a left space
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
_A = """Encode <mask> sequence"""
_A = """Encode <mask>sequence"""
_A = tokenizer.encode(_UpperCAmelCase )
_A = encoded.index(_UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase )
_A = encoded.index(_UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Any ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_A = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_A = """A, <mask> AllenNLP sentence."""
_A = tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
_A = tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_A = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_A = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowerCAmelCase_ ( self : int ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_A = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_A = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_A = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A = F'''{text_of_1_token} {text_of_1_token}'''
_A = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_A = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_A = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_A = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_A = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_A = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_A = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_A = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_A = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
"""simple docstring"""
import math
def UpperCAmelCase ( snake_case : Union[str, Any] ):
_lowerCAmelCase:Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase__ )
def UpperCAmelCase ( snake_case : Union[str, Any] = 1 / 12345 ):
_lowerCAmelCase:int = 0
_lowerCAmelCase:List[str] = 0
_lowerCAmelCase:str = 3
while True:
_lowerCAmelCase:Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase__ ):
_lowerCAmelCase:Any = int(lowerCamelCase__ )
total_partitions += 1
if check_partition_perfect(lowerCamelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase__ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 227 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[Any] , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def A_ ( self : Any , lowercase_ : List[str] = "auto" ):
if slice_size == "auto":
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def A_ ( self : Any ):
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def __call__( self : Dict , lowercase_ : Any , lowercase_ : Union[str, Any]=1_6000 , lowercase_ : Any = 512 , lowercase_ : Tuple = 512 , lowercase_ : Any = 50 , lowercase_ : Tuple = 7.5 , lowercase_ : Union[str, Any] = None , lowercase_ : Any = 1 , lowercase_ : Optional[int] = 0.0 , lowercase_ : Dict = None , lowercase_ : int = None , lowercase_ : int = "pil" , lowercase_ : Dict = True , lowercase_ : List[Any] = None , lowercase_ : Tuple = 1 , **lowercase_ : int , ):
snake_case_ = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='''pt''' , sampling_rate=lowercase_ ).input_features.to(self.device )
snake_case_ = self.speech_model.generate(lowercase_ , max_length=48_0000 )
snake_case_ = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_ )[
0
]
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = 1
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ = len(lowercase_ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_ )}." )
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case_ = text_embeddings.shape
snake_case_ = text_embeddings.repeat(1 , lowercase_ , 1 )
snake_case_ = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = [""""""] * batch_size
elif type(lowercase_ ) is not type(lowercase_ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_ )} !="
F" {type(lowercase_ )}." )
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ = [negative_prompt]
elif batch_size != len(lowercase_ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
snake_case_ = negative_prompt
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = uncond_embeddings.shape[1]
snake_case_ = uncond_embeddings.repeat(1 , lowercase_ , 1 )
snake_case_ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case_ = torch.randn(lowercase_ , generator=lowercase_ , device='''cpu''' , dtype=lowercase_ ).to(
self.device )
else:
snake_case_ = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
snake_case_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ = {}
if accepts_eta:
snake_case_ = eta
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
snake_case_ = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case_ = noise_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
snake_case_ = 1 / 0.1_8215 * latents
snake_case_ = self.vae.decode(lowercase_ ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
| 640 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__magic_name__ = (EulerDiscreteScheduler,)
__magic_name__ = 1_0
def _UpperCAmelCase ( self , **lowerCamelCase__ ):
UpperCAmelCase__: int = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCamelCase__ )
return config
def _UpperCAmelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _UpperCAmelCase ( self ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def _UpperCAmelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def _UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[Any] = self.scheduler_classes[0]
UpperCAmelCase__: str = self.get_scheduler_config()
UpperCAmelCase__: Dict = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase__: Dict = torch.manual_seed(0 )
UpperCAmelCase__: Optional[int] = self.dummy_model()
UpperCAmelCase__: List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase__: Union[str, Any] = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__: Union[str, Any] = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: str = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: List[str] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = output.prev_sample
UpperCAmelCase__: List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__: Union[str, Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__: Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase__: Union[str, Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase__: List[str] = torch.manual_seed(0 )
UpperCAmelCase__: Optional[int] = self.dummy_model()
UpperCAmelCase__: Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase__: Optional[int] = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__: Dict = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: Dict = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = output.prev_sample
UpperCAmelCase__: List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__: Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[Any] = self.scheduler_classes[0]
UpperCAmelCase__: str = self.get_scheduler_config()
UpperCAmelCase__: Union[str, Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
UpperCAmelCase__: List[Any] = torch.manual_seed(0 )
UpperCAmelCase__: List[str] = self.dummy_model()
UpperCAmelCase__: Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase__: Union[str, Any] = sample.to(lowerCamelCase__ )
for t in scheduler.timesteps:
UpperCAmelCase__: List[Any] = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: List[str] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
UpperCAmelCase__: List[str] = output.prev_sample
UpperCAmelCase__: List[str] = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__: int = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def _UpperCAmelCase ( self ):
UpperCAmelCase__: str = self.scheduler_classes[0]
UpperCAmelCase__: Optional[int] = self.get_scheduler_config()
UpperCAmelCase__: Any = scheduler_class(**lowerCamelCase__ , use_karras_sigmas=lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
UpperCAmelCase__: List[Any] = torch.manual_seed(0 )
UpperCAmelCase__: Dict = self.dummy_model()
UpperCAmelCase__: Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase__: Optional[Any] = sample.to(lowerCamelCase__ )
for t in scheduler.timesteps:
UpperCAmelCase__: List[str] = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: str = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: Tuple = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = output.prev_sample
UpperCAmelCase__: List[str] = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__: str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3 | 113 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = 1
@register_to_config
def __init__( self : Optional[int] , A : Any = 20_00 , A : Tuple = 0.15 , A : str = 0.01 , A : Any = 13_48.0 , A : List[str] = 1e-5 , A : Optional[int] = 1 , ) -> Optional[Any]:
# standard deviation of the initial noise distribution
lowercase_ : Optional[int] = sigma_max
# setable values
lowercase_ : Dict = None
self.set_sigmas(A , A , A , A )
def A ( self : List[Any] , A : Union[str, Any] , A : str = None ) -> Dict:
return sample
def A ( self : Optional[Any] , A : str , A : Tuple = None , A : int = None ) -> Any:
lowercase_ : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase_ : List[str] = torch.linspace(1 , A , A , device=A )
def A ( self : List[str] , A : str , A : Optional[Any] = None , A : Optional[int] = None , A : List[str] = None ) -> Optional[int]:
lowercase_ : Tuple = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase_ : Union[str, Any] = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase_ : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(A , A )
lowercase_ : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase_ : Optional[Any] = torch.exp(torch.linspace(math.log(A ) , math.log(A ) , A ) )
lowercase_ : List[str] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def A ( self : Optional[Any] , A : List[Any] , A : Optional[Any] ) -> List[str]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def A ( self : Dict , A : Any , A : Union[str, Any] , A : List[Any] , A : Dict = None , A : Optional[int] = True , ) -> Dict:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase_ : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase_ : List[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase_ : List[Any] = timesteps.to(self.discrete_sigmas.device )
lowercase_ : List[Any] = self.discrete_sigmas[timesteps].to(sample.device )
lowercase_ : Dict = self.get_adjacent_sigma(A , A ).to(sample.device )
lowercase_ : str = torch.zeros_like(A )
lowercase_ : Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase_ : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase_ : Dict = diffusion.unsqueeze(-1 )
lowercase_ : List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase_ : Dict = randn_tensor(
sample.shape , layout=sample.layout , generator=A , device=sample.device , dtype=sample.dtype )
lowercase_ : str = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase_ : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=A , prev_sample_mean=A )
def A ( self : Any , A : Any , A : Dict , A : Any = None , A : int = True , ) -> Optional[Any]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase_ : Any = randn_tensor(sample.shape , layout=sample.layout , generator=A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase_ : Optional[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowercase_ : Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowercase_ : int = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase_ : List[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase_ : Any = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase_ : Optional[Any] = step_size.unsqueeze(-1 )
lowercase_ : Dict = sample + step_size * model_output
lowercase_ : Dict = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def A ( self : Any , A : str , A : Optional[Any] , A : Union[str, Any] , ) -> Tuple:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase_ : str = timesteps.to(original_samples.device )
lowercase_ : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase_ : str = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(A ) * sigmas[:, None, None, None]
)
lowercase_ : Tuple = noise + original_samples
return noisy_samples
def __len__( self : Dict ) -> Optional[Any]:
return self.config.num_train_timesteps
| 231 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowercase ( a__ ) -> int:
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def __lowercase ( a__ ) -> List[Any]:
for char in word:
__SCREAMING_SNAKE_CASE = ord(lowerCamelCase__ )
if not _is_chinese_char(lowerCamelCase__ ):
return 0
return 1
def __lowercase ( a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = set()
for token in tokens:
__SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) > 1 and is_chinese(lowerCamelCase__ )
if chinese_word:
word_set.add(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase__ )
return word_list
def __lowercase ( a__ , a__ ) -> Optional[Any]:
if not chinese_word_set:
return bert_tokens
__SCREAMING_SNAKE_CASE = max([len(lowerCamelCase__ ) for w in chinese_word_set] )
__SCREAMING_SNAKE_CASE = bert_tokens
__SCREAMING_SNAKE_CASE = 0, len(lowerCamelCase__ )
while start < end:
__SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
__SCREAMING_SNAKE_CASE = min(end - start , lowerCamelCase__ )
for i in range(lowerCamelCase__ , 1 , -1 ):
__SCREAMING_SNAKE_CASE = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__SCREAMING_SNAKE_CASE = """##""" + bert_word[j]
__SCREAMING_SNAKE_CASE = start + i
__SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(lowerCamelCase__ ) , 1_00 ):
__SCREAMING_SNAKE_CASE = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['cws'] ).cws
__SCREAMING_SNAKE_CASE = [get_chinese_word(lowerCamelCase__ ) for r in res]
ltp_res.extend(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(lowerCamelCase__ ) , 1_00 ):
__SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(lowerCamelCase__ , lowerCamelCase__ ):
__SCREAMING_SNAKE_CASE = []
for id in input_ids:
__SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(lowerCamelCase__ )
input_tokens.append(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = add_sub_symbol(lowerCamelCase__ , lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase__ ):
if token[:2] == "##":
__SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase__ ) == 1 and _is_chinese_char(ord(lowerCamelCase__ ) ):
ref_id.append(lowerCamelCase__ )
ref_ids.append(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
return ref_ids
def __lowercase ( a__ ) -> int:
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(lowerCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
__SCREAMING_SNAKE_CASE = prepare_ref(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__SCREAMING_SNAKE_CASE = [json.dumps(lowerCamelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ : Any =argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
lowerCAmelCase__ : Optional[Any] =parser.parse_args()
main(args)
| 148 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 204 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''MobileViTFeatureExtractor''']
lowerCamelCase_ = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 498 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
def _UpperCAmelCase ( A ):
'''simple docstring'''
if point:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for item in point:
if not isinstance(lowerCamelCase__ , (int, float) ):
UpperCAmelCase__ =(
"""Expected a list of numbers as input, found """
F"""{type(lowerCamelCase__ ).__name__}"""
)
raise TypeError(lowerCamelCase__ )
else:
UpperCAmelCase__ =F"""Expected a list of numbers as input, found {type(lowerCamelCase__ ).__name__}"""
raise TypeError(lowerCamelCase__ )
else:
raise ValueError("Missing an input" )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase__ (_UpperCAmelCase):
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set())
@pytest.fixture
def lowerCamelCase__ (_UpperCAmelCase):
class _snake_case :
def __init__( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = metric_id
class _snake_case :
_lowercase : str = [MetricMock(__UpperCAmelCase ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock())
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if "tmp_path" in args:
SCREAMING_SNAKE_CASE = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args)
with pytest.warns(lowerCamelCase__ , match='https://huggingface.co/docs/evaluate'):
func(*lowerCamelCase__)
| 73 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE( __UpperCAmelCase ):
_UpperCAmelCase = (IPNDMScheduler,)
_UpperCAmelCase = (('num_inference_steps', 5_0),)
def lowerCAmelCase_ ( self: Any , **UpperCamelCase: Union[str, Any] ) -> str:
snake_case__ = {"""num_train_timesteps""": 10_00}
config.update(**UpperCamelCase )
return config
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Tuple=0 , **UpperCamelCase: List[str] ) -> Optional[Any]:
snake_case__ = dict(self.forward_default_kwargs )
snake_case__ = kwargs.pop('num_inference_steps' , UpperCamelCase )
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase )
snake_case__ = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase )
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self: Any ) -> List[str]:
pass
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[Any]=0 , **UpperCamelCase: Union[str, Any] ) -> int:
snake_case__ = dict(self.forward_default_kwargs )
snake_case__ = kwargs.pop('num_inference_steps' , UpperCamelCase )
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self: int , **UpperCamelCase: Optional[int] ) -> List[str]:
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase )
snake_case__ = scheduler_class(**UpperCamelCase )
snake_case__ = 10
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = model(UpperCamelCase , UpperCamelCase )
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = model(UpperCamelCase , UpperCamelCase )
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
return sample
def lowerCAmelCase_ ( self: int ) -> str:
snake_case__ = dict(self.forward_default_kwargs )
snake_case__ = kwargs.pop('num_inference_steps' , UpperCamelCase )
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase )
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase , 'set_timesteps' ):
scheduler.set_timesteps(UpperCamelCase )
elif num_inference_steps is not None and not hasattr(UpperCamelCase , 'set_timesteps' ):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self: str ) -> List[str]:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase , time_step=UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=UpperCamelCase , time_step=UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> str:
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 328 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
_A = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_A = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , x.transpose() ) )
_A = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase_ ( self : int ):
_A = np.random.randn(3 , 4 )
_A = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
_A = np.random.randn(3 , 4 , 5 )
_A = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : int ):
_A = np.random.randn(3 , 4 )
_A = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
_A = np.random.randn(3 , 4 , 5 )
_A = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = np.random.randn(3 , 4 )
_A = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , np.asarray(transpose(_UpperCAmelCase ) ) ) )
_A = np.random.randn(3 , 4 , 5 )
_A = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.reshape(_UpperCAmelCase , (4, 3) ) ) )
_A = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.reshape(_UpperCAmelCase , (12, 5) ) ) )
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = np.random.randn(3 , 4 )
_A = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
_A = np.random.randn(3 , 4 , 5 )
_A = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : Dict ):
_A = np.random.randn(3 , 4 )
_A = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
_A = np.random.randn(3 , 4 , 5 )
_A = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : Dict ):
_A = np.random.randn(3 , 4 )
_A = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.asarray(reshape(_UpperCAmelCase , (4, 3) ) ) ) )
_A = np.random.randn(3 , 4 , 5 )
_A = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.asarray(reshape(_UpperCAmelCase , (12, 5) ) ) ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.squeeze(_UpperCAmelCase ) ) )
_A = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.squeeze(_UpperCAmelCase , axis=2 ) ) )
@require_torch
def lowerCAmelCase_ ( self : int ):
_A = np.random.randn(1 , 3 , 4 )
_A = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
_A = np.random.randn(1 , 4 , 1 , 5 )
_A = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : str ):
_A = np.random.randn(1 , 3 , 4 )
_A = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
_A = np.random.randn(1 , 4 , 1 , 5 )
_A = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : List[Any] ):
_A = np.random.randn(1 , 3 , 4 )
_A = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.asarray(squeeze(_UpperCAmelCase ) ) ) )
_A = np.random.randn(1 , 4 , 1 , 5 )
_A = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.asarray(squeeze(_UpperCAmelCase , axis=2 ) ) ) )
def lowerCAmelCase_ ( self : List[str] ):
_A = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.expand_dims(_UpperCAmelCase , axis=1 ) ) )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
_A = np.random.randn(3 , 4 )
_A = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = np.random.randn(3 , 4 )
_A = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : Optional[int] ):
_A = np.random.randn(3 , 4 )
_A = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(_UpperCAmelCase , axis=1 ) ) ) )
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ ( __UpperCAmelCase ):
def __init__( self : str ,a__ : Optional[Any] ,a__ : str=13 ,a__ : Optional[Any]=7 ,a__ : List[Any]=True ,a__ : int=True ,a__ : Any=True ,a__ : int=True ,a__ : List[Any]=99 ,a__ : Optional[int]=32 ,a__ : str=5 ,a__ : Dict=4 ,a__ : str=37 ,a__ : str="gelu" ,a__ : str=0.1 ,a__ : List[str]=0.1 ,a__ : int=512 ,a__ : Optional[int]=16 ,a__ : Dict=2 ,a__ : Optional[Any]=0.02 ,a__ : Optional[Any]=False ,a__ : Optional[Any]=True ,a__ : Optional[int]="None" ,a__ : List[str]=3 ,a__ : str=4 ,a__ : Dict=None ,) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = parent
_lowerCAmelCase:int = batch_size
_lowerCAmelCase:Union[str, Any] = seq_length
_lowerCAmelCase:str = is_training
_lowerCAmelCase:Dict = use_input_mask
_lowerCAmelCase:Dict = use_token_type_ids
_lowerCAmelCase:Dict = use_labels
_lowerCAmelCase:List[str] = vocab_size
_lowerCAmelCase:int = hidden_size
_lowerCAmelCase:Tuple = num_hidden_layers
_lowerCAmelCase:int = num_attention_heads
_lowerCAmelCase:Optional[Any] = intermediate_size
_lowerCAmelCase:Any = hidden_act
_lowerCAmelCase:List[Any] = hidden_dropout_prob
_lowerCAmelCase:Any = attention_probs_dropout_prob
_lowerCAmelCase:int = max_position_embeddings
_lowerCAmelCase:Optional[Any] = type_vocab_size
_lowerCAmelCase:Optional[int] = type_sequence_label_size
_lowerCAmelCase:str = initializer_range
_lowerCAmelCase:Union[str, Any] = num_labels
_lowerCAmelCase:Union[str, Any] = num_choices
_lowerCAmelCase:Dict = relative_attention
_lowerCAmelCase:Tuple = position_biased_input
_lowerCAmelCase:str = pos_att_type
_lowerCAmelCase:Union[str, Any] = scope
def __UpperCamelCase ( self : int) -> Any:
"""simple docstring"""
_lowerCAmelCase:List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
_lowerCAmelCase:Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase:str = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2)
_lowerCAmelCase:List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase:List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
_lowerCAmelCase:Optional[Any] = None
_lowerCAmelCase:Union[str, Any] = None
_lowerCAmelCase:Dict = None
if self.use_labels:
_lowerCAmelCase:List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
_lowerCAmelCase:Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
_lowerCAmelCase:Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices)
_lowerCAmelCase:Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str) -> Tuple:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def __UpperCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:List[Any] = self.get_config()
_lowerCAmelCase:Any = 300
return config
def __UpperCamelCase ( self : int ,a__ : str) -> Optional[Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()) ,[])
def __UpperCamelCase ( self : str ,a__ : Union[str, Any] ,a__ : Optional[int] ,a__ : Union[str, Any] ,a__ : int ,a__ : Tuple ,a__ : Dict ,a__ : List[Any]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = DebertaModel(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:int = model(a__ ,attention_mask=a__ ,token_type_ids=a__)[0]
_lowerCAmelCase:int = model(a__ ,token_type_ids=a__)[0]
_lowerCAmelCase:str = model(a__)[0]
self.parent.assertListEqual(list(sequence_output.size()) ,[self.batch_size, self.seq_length, self.hidden_size])
def __UpperCamelCase ( self : Union[str, Any] ,a__ : Optional[int] ,a__ : Optional[int] ,a__ : List[Any] ,a__ : Optional[int] ,a__ : Optional[Any] ,a__ : Dict ,a__ : List[Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = DebertaForMaskedLM(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Dict = model(a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self : List[Any] ,a__ : Tuple ,a__ : str ,a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Dict ,a__ : Dict ,a__ : str) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.num_labels
_lowerCAmelCase:Optional[int] = DebertaForSequenceClassification(a__)
model.to(a__)
model.eval()
_lowerCAmelCase:List[Any] = model(a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__)
self.parent.assertListEqual(list(result.logits.size()) ,[self.batch_size, self.num_labels])
self.check_loss_output(a__)
def __UpperCamelCase ( self : Union[str, Any] ,a__ : Optional[Any] ,a__ : Tuple ,a__ : List[str] ,a__ : Any ,a__ : Dict ,a__ : Tuple ,a__ : int) -> Any:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.num_labels
_lowerCAmelCase:List[str] = DebertaForTokenClassification(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Tuple = model(a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def __UpperCamelCase ( self : Optional[int] ,a__ : List[Any] ,a__ : List[str] ,a__ : str ,a__ : Any ,a__ : Union[str, Any] ,a__ : Dict ,a__ : Optional[Any]) -> str:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = DebertaForQuestionAnswering(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:str = model(
a__ ,attention_mask=a__ ,token_type_ids=a__ ,start_positions=a__ ,end_positions=a__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def __UpperCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.prepare_config_and_inputs()
(
_lowerCAmelCase
):Dict = config_and_inputs
_lowerCAmelCase:Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
snake_case__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = DebertaModelTester(self)
_lowerCAmelCase:int = ConfigTester(self ,config_class=a__ ,hidden_size=37)
def __UpperCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__)
def __UpperCamelCase ( self : str) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__)
def __UpperCamelCase ( self : int) -> int:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__)
def __UpperCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__)
def __UpperCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__)
@slow
def __UpperCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:str = DebertaModel.from_pretrained(a__)
self.assertIsNotNone(a__)
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''')
def __UpperCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Tuple = DebertaModel.from_pretrained('''microsoft/deberta-base''')
_lowerCAmelCase:int = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]])
_lowerCAmelCase:Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_lowerCAmelCase:Union[str, Any] = model(a__ ,attention_mask=a__)[0]
# compare the actual values for a slice.
_lowerCAmelCase:int = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,a__ ,atol=1E-4) ,F'{output[:, 1:4, 1:4]}')
| 227 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a : Any = '''__DUMMY_TRANSFORMERS_USER__'''
a : Optional[Any] = '''Dummy User'''
a : List[str] = '''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'''
a : Union[str, Any] = '''https://hub-ci.huggingface.co'''
a : Optional[Any] = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
a : Dict = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
a : Tuple = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''', lowerCamelCase__ )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''', lowerCamelCase__ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''', lowerCamelCase__ )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''', lowerCamelCase__ )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(lowerCamelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=lowerCamelCase__ )
@pytest.fixture(scope='''session''' )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = HfFolder.get_token()
HfFolder.save_token(lowerCamelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCamelCase__ )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(lowerCamelCase__ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = F"repo_txt_data-{int(time.time() * 10e3 )}"
snake_case_ = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''', private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__, path_or_fileobj=str(lowerCamelCase__ ), path_in_repo='''data/text_data.txt''', repo_id=lowerCamelCase__, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = F"repo_zipped_txt_data-{int(time.time() * 10e3 )}"
snake_case_ = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''', private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__, path_or_fileobj=str(lowerCamelCase__ ), path_in_repo='''data.zip''', repo_id=lowerCamelCase__, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = F"repo_zipped_img_data-{int(time.time() * 10e3 )}"
snake_case_ = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''', private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__, path_or_fileobj=str(lowerCamelCase__ ), path_in_repo='''data.zip''', repo_id=lowerCamelCase__, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__, token=lowerCamelCase__, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 640 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
UpperCAmelCase__: int = number_of_bytes // partitions
UpperCAmelCase__: Union[str, Any] = []
for i in range(lowerCamelCase__ ):
UpperCAmelCase__: Dict = i * bytes_per_partition + 1
UpperCAmelCase__: Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 113 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _UpperCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = 'mgp-str'
def __init__( self : Tuple , A : List[Any]=[32, 1_28] , A : Any=4 , A : str=3 , A : Dict=27 , A : Union[str, Any]=38 , A : str=5_02_57 , A : Union[str, Any]=3_05_22 , A : str=7_68 , A : int=12 , A : int=12 , A : Any=4.0 , A : Union[str, Any]=True , A : Any=False , A : Dict=1e-5 , A : Union[str, Any]=0.0 , A : str=0.0 , A : Tuple=0.0 , A : List[str]=False , A : Tuple=0.02 , **A : str , ) -> Tuple:
super().__init__(**A )
lowercase_ : int = image_size
lowercase_ : List[str] = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : List[str] = max_token_length
lowercase_ : int = num_character_labels
lowercase_ : str = num_bpe_labels
lowercase_ : Tuple = num_wordpiece_labels
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : Tuple = mlp_ratio
lowercase_ : str = distilled
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : str = drop_rate
lowercase_ : int = qkv_bias
lowercase_ : Dict = attn_drop_rate
lowercase_ : List[Any] = drop_path_rate
lowercase_ : Any = output_aa_attentions
lowercase_ : Union[str, Any] = initializer_range
| 231 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowercase ( a__ , a__ , a__ ) -> Any:
__SCREAMING_SNAKE_CASE = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
__SCREAMING_SNAKE_CASE = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(a__ ):
for patt, repl in iter(lowerCamelCase__ ):
__SCREAMING_SNAKE_CASE = name.replace(lowerCamelCase__ , lowerCamelCase__ )
return f"""bert/{name}"""
def create_tf_var(a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=lowerCamelCase__ , shape=tensor.shape , name=lowerCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=lowerCamelCase__ , name=lowerCamelCase__ , session=lowerCamelCase__ )
tf.keras.backend.set_value(lowerCamelCase__ , lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = session.run(lowerCamelCase__ )
print(f"""Successfully created {tf_name}: {np.allclose(lowerCamelCase__ , lowerCamelCase__ )}""" )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase__ , os.path.join(lowerCamelCase__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def __lowercase ( a__=None ) -> Any:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Directory in which to save tensorflow model' )
__SCREAMING_SNAKE_CASE = parser.parse_args(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 148 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
import os
def UpperCamelCase ( ):
with open(os.path.dirname(lowerCamelCase__ ) + "/p022_names.txt" ) as file:
snake_case : List[str] = str(file.readlines()[0] )
snake_case : Dict = names.replace("\"" , "" ).split("," )
names.sort()
snake_case : List[str] = 0
snake_case : str = 0
for i, name in enumerate(lowerCamelCase__ ):
for letter in name:
name_score += ord(lowerCamelCase__ ) - 64
total_score += (i + 1) * name_score
snake_case : Tuple = 0
return total_score
if __name__ == "__main__":
print(solution())
| 204 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE :str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
__SCREAMING_SNAKE_CASE :Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
__SCREAMING_SNAKE_CASE :Dict = os.path.join(self.tmpdirname ,SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file ,'''w''' ,encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE :Optional[int] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE :Tuple = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE :List[str] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE :List[Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE :str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Tuple = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE :Optional[Any] = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='''np''' )
__SCREAMING_SNAKE_CASE :Any = processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE :int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :int = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = """lower newer"""
__SCREAMING_SNAKE_CASE :List[str] = processor(text=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.get_image_processor()
__SCREAMING_SNAKE_CASE :str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :List[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = """lower newer"""
__SCREAMING_SNAKE_CASE :int = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE :Any = processor(text=SCREAMING_SNAKE_CASE__ ,images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) ,['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Tuple = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE :Optional[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE :str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :List[str] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = """lower newer"""
__SCREAMING_SNAKE_CASE :Dict = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE :Optional[int] = processor(text=SCREAMING_SNAKE_CASE__ ,images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names ) | 498 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
from collections import defaultdict
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =first_str.lower().strip()
UpperCAmelCase__ =second_str.lower().strip()
# Remove whitespace
UpperCAmelCase__ =first_str.replace(" " , "" )
UpperCAmelCase__ =second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
# Default values for count should be 0
UpperCAmelCase__ =defaultdict(lowerCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input('Enter the first string ').strip()
UpperCamelCase_ = input('Enter the second string ').strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 625 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Dict = logging.get_logger(__name__)
a_ : Dict = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
a_ : Tuple = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(lowerCamelCase__ , map_location='cpu')
return sd
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=rename_keys_prefix):
SCREAMING_SNAKE_CASE = OrderedDict()
SCREAMING_SNAKE_CASE = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE = new_key.replace(name_pair[0] , name_pair[1])
SCREAMING_SNAKE_CASE = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
assert (
checkpoint_path.split('/')[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE = """pretraining"""
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 512}
SCREAMING_SNAKE_CASE = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 2048}
SCREAMING_SNAKE_CASE = """vqa_advanced"""
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
SCREAMING_SNAKE_CASE = """vqa"""
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
SCREAMING_SNAKE_CASE = """nlvr"""
SCREAMING_SNAKE_CASE = VisualBertConfig(**lowerCamelCase__)
# Load State Dict
SCREAMING_SNAKE_CASE = load_state_dict(lowerCamelCase__)
SCREAMING_SNAKE_CASE = get_new_dict(lowerCamelCase__ , lowerCamelCase__)
if model_type == "pretraining":
SCREAMING_SNAKE_CASE = VisualBertForPreTraining(lowerCamelCase__)
elif model_type == "vqa":
SCREAMING_SNAKE_CASE = VisualBertForQuestionAnswering(lowerCamelCase__)
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE = VisualBertForVisualReasoning(lowerCamelCase__)
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE = VisualBertForMultipleChoice(lowerCamelCase__)
model.load_state_dict(lowerCamelCase__)
# Save Checkpoints
Path(lowerCamelCase__).mkdir(exist_ok=lowerCamelCase__)
model.save_pretrained(lowerCamelCase__)
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
a_ : Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 73 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Optional[Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''ViTFeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 328 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : str ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a__ ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def __UpperCamelCase ( a__ : Tuple) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __UpperCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
| 227 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, ) -> str:
'''simple docstring'''
if config_name_or_path is None:
snake_case_ = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
snake_case_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
snake_case_ = question_encoder_name_or_path
snake_case_ = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
snake_case_ = RagConfig.from_pretrained(lowerCamelCase__ )
snake_case_ = AutoConfig.from_pretrained(lowerCamelCase__ )
snake_case_ = AutoConfig.from_pretrained(lowerCamelCase__ )
snake_case_ = gen_config
snake_case_ = question_encoder_config
snake_case_ = model_class.from_pretrained_question_encoder_generator(
lowerCamelCase__, lowerCamelCase__, config=lowerCamelCase__ )
rag_model.save_pretrained(lowerCamelCase__ )
# Sanity check.
model_class.from_pretrained(lowerCamelCase__ )
# Save tokenizers.
snake_case_ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
snake_case_ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
a : Dict = parser.parse_args()
a : Union[str, Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 640 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__magic_name__ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__magic_name__ = PandasConfig
def _UpperCAmelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase__: List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ , (str, list, tuple) ):
UpperCAmelCase__: List[Any] = data_files
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase__: int = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase__: List[str] = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase__: Optional[int] = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase__: Tuple = table_cast(lowerCamelCase__ , self.config.features.arrow_schema )
return pa_table
def _UpperCAmelCase ( self , lowerCamelCase__ ):
for i, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
with open(lowerCamelCase__ , "rb" ) as f:
UpperCAmelCase__: Dict = pa.Table.from_pandas(pd.read_pickle(lowerCamelCase__ ) )
yield i, self._cast_table(lowerCamelCase__ ) | 113 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
"""simple docstring"""
import operator as op
def lowercase ( __snake_case : str ):
lowercase_ : List[Any] = []
lowercase_ : Dict = lambda __snake_case , __snake_case : int(x / y ) # noqa: E731 integer division operation
lowercase_ : str = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(1_2 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (3_0 + len(lowerCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowerCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(1_2 ) , ''','''.join(lowerCamelCase__ ) , sep=''' | ''' )
else:
lowercase_ : Tuple = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(1_2 ) , ''','''.join(lowerCamelCase__ ) , sep=''' | ''' )
lowercase_ : List[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(1_2 ) , ''','''.join(lowerCamelCase__ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(lowerCamelCase__ ) , int(lowerCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(1_2 ) , ''','''.join(lowerCamelCase__ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
__A : List[str] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 231 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = CTRLTokenizer
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : Dict = False
def _A ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
__SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """adapt react readapt apt"""
__SCREAMING_SNAKE_CASE = """adapt react readapt apt"""
return input_text, output_text
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = """adapt react readapt apt"""
__SCREAMING_SNAKE_CASE = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 148 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
import math
import tensorflow as tf
from packaging import version
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
snake_case : Dict = tf.convert_to_tensor(lowerCamelCase__ )
snake_case : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Optional[Any] = tf.convert_to_tensor(lowerCamelCase__ )
snake_case : List[Any] = tf.cast(math.pi , x.dtype )
snake_case : Any = tf.cast(0.04_4715 , x.dtype )
snake_case : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) ))
return x * cdf
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Tuple = tf.convert_to_tensor(lowerCamelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Tuple = tf.convert_to_tensor(lowerCamelCase__ )
snake_case : Optional[Any] = tf.cast(0.04_4715 , x.dtype )
snake_case : str = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
snake_case : Optional[int] = tf.convert_to_tensor(lowerCamelCase__ )
snake_case : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def UpperCamelCase ( __lowerCamelCase : Any ):
return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -10 , 10 )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=-1 ):
snake_case : Any = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ )
return a * tf.math.sigmoid(lowerCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ )
__lowerCamelCase = tf.keras.activations.gelu
__lowerCamelCase = approximate_gelu_wrap
else:
__lowerCamelCase = _gelu
__lowerCamelCase = _gelu_new
__lowerCamelCase = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def UpperCamelCase ( __lowerCamelCase : Any ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 204 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE__ ) ,torch_builtin(SCREAMING_SNAKE_CASE__ ) ) )
self.assertFalse(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE__ ) ,gelu_new(SCREAMING_SNAKE_CASE__ ) ) )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__SCREAMING_SNAKE_CASE :Optional[Any] = get_activation('''gelu''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = get_activation('''gelu_10''' )
__SCREAMING_SNAKE_CASE :Dict = torch_builtin(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = geluaa(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = torch.where(y_gelu_aa < 1_0.0 ,1 ,0 )
self.assertTrue(torch.max(SCREAMING_SNAKE_CASE__ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
get_activation('''bogus''' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
get_activation(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = get_activation('''gelu''' )
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :Dict = get_activation('''gelu''' )
self.assertEqual(acta.a ,1 )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :List[Any] = acta.a | 498 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase_ = '''CompVis/stable-diffusion-v1-1'''
UpperCamelCase_ = '''CompVis/stable-diffusion-v1-2'''
UpperCamelCase_ = '''CompVis/stable-diffusion-v1-3'''
UpperCamelCase_ = '''CompVis/stable-diffusion-v1-4'''
class snake_case_ ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self, A_, A_, A_, A_, A_, A_, A_, A_ = True, ) -> Any:
super()._init_()
UpperCAmelCase__ =StableDiffusionPipeline.from_pretrained(A_ )
UpperCAmelCase__ =StableDiffusionPipeline.from_pretrained(A_ )
UpperCAmelCase__ =StableDiffusionPipeline.from_pretrained(A_ )
UpperCAmelCase__ =StableDiffusionPipeline(
vae=A_, text_encoder=A_, tokenizer=A_, unet=A_, scheduler=A_, safety_checker=A_, feature_extractor=A_, requires_safety_checker=A_, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def __UpperCAmelCase ( self ) -> List[str]:
return {k: getattr(self, A_ ) for k in self.config.keys() if not k.startswith("_" )}
def __UpperCAmelCase ( self, A_ = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A_ )
def __UpperCAmelCase ( self ) -> Any:
self.enable_attention_slicing(A_ )
@torch.no_grad()
def __UpperCAmelCase ( self, A_, A_ = 512, A_ = 512, A_ = 50, A_ = 7.5, A_ = None, A_ = 1, A_ = 0.0, A_ = None, A_ = None, A_ = "pil", A_ = True, A_ = None, A_ = 1, **A_, ) -> str:
return self.pipea(
prompt=A_, height=A_, width=A_, num_inference_steps=A_, guidance_scale=A_, negative_prompt=A_, num_images_per_prompt=A_, eta=A_, generator=A_, latents=A_, output_type=A_, return_dict=A_, callback=A_, callback_steps=A_, **A_, )
@torch.no_grad()
def __UpperCAmelCase ( self, A_, A_ = 512, A_ = 512, A_ = 50, A_ = 7.5, A_ = None, A_ = 1, A_ = 0.0, A_ = None, A_ = None, A_ = "pil", A_ = True, A_ = None, A_ = 1, **A_, ) -> Optional[Any]:
return self.pipea(
prompt=A_, height=A_, width=A_, num_inference_steps=A_, guidance_scale=A_, negative_prompt=A_, num_images_per_prompt=A_, eta=A_, generator=A_, latents=A_, output_type=A_, return_dict=A_, callback=A_, callback_steps=A_, **A_, )
@torch.no_grad()
def __UpperCAmelCase ( self, A_, A_ = 512, A_ = 512, A_ = 50, A_ = 7.5, A_ = None, A_ = 1, A_ = 0.0, A_ = None, A_ = None, A_ = "pil", A_ = True, A_ = None, A_ = 1, **A_, ) -> Tuple:
return self.pipea(
prompt=A_, height=A_, width=A_, num_inference_steps=A_, guidance_scale=A_, negative_prompt=A_, num_images_per_prompt=A_, eta=A_, generator=A_, latents=A_, output_type=A_, return_dict=A_, callback=A_, callback_steps=A_, **A_, )
@torch.no_grad()
def __UpperCAmelCase ( self, A_, A_ = 512, A_ = 512, A_ = 50, A_ = 7.5, A_ = None, A_ = 1, A_ = 0.0, A_ = None, A_ = None, A_ = "pil", A_ = True, A_ = None, A_ = 1, **A_, ) -> Optional[Any]:
return self.pipea(
prompt=A_, height=A_, width=A_, num_inference_steps=A_, guidance_scale=A_, negative_prompt=A_, num_images_per_prompt=A_, eta=A_, generator=A_, latents=A_, output_type=A_, return_dict=A_, callback=A_, callback_steps=A_, **A_, )
@torch.no_grad()
def __UpperCAmelCase ( self, A_, A_ = 512, A_ = 512, A_ = 50, A_ = 7.5, A_ = None, A_ = 1, A_ = 0.0, A_ = None, A_ = None, A_ = "pil", A_ = True, A_ = None, A_ = 1, **A_, ) -> Union[str, Any]:
UpperCAmelCase__ ="""cuda""" if torch.cuda.is_available() else """cpu"""
self.to(A_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase__ =self.textaimg_sda_a(
prompt=A_, height=A_, width=A_, num_inference_steps=A_, guidance_scale=A_, negative_prompt=A_, num_images_per_prompt=A_, eta=A_, generator=A_, latents=A_, output_type=A_, return_dict=A_, callback=A_, callback_steps=A_, **A_, )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase__ =self.textaimg_sda_a(
prompt=A_, height=A_, width=A_, num_inference_steps=A_, guidance_scale=A_, negative_prompt=A_, num_images_per_prompt=A_, eta=A_, generator=A_, latents=A_, output_type=A_, return_dict=A_, callback=A_, callback_steps=A_, **A_, )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase__ =self.textaimg_sda_a(
prompt=A_, height=A_, width=A_, num_inference_steps=A_, guidance_scale=A_, negative_prompt=A_, num_images_per_prompt=A_, eta=A_, generator=A_, latents=A_, output_type=A_, return_dict=A_, callback=A_, callback_steps=A_, **A_, )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase__ =self.textaimg_sda_a(
prompt=A_, height=A_, width=A_, num_inference_steps=A_, guidance_scale=A_, negative_prompt=A_, num_images_per_prompt=A_, eta=A_, generator=A_, latents=A_, output_type=A_, return_dict=A_, callback=A_, callback_steps=A_, **A_, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 625 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
SCREAMING_SNAKE_CASE = dict(zip(a , range(len(a))))
SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(a) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(a))
SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , a)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(a , a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> List[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Union[str, Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(a , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=a , image_processor=a)
processor_slow.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=a)
SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=a , image_processor=a)
processor_fast.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , a)
self.assertIsInstance(processor_fast.tokenizer , a)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , a)
self.assertIsInstance(processor_fast.image_processor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=a)
SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=a , image_processor=a)
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(a , return_tensors='np')
SCREAMING_SNAKE_CASE = processor(images=a , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=a , image_processor=a)
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = processor(text=a , return_tensors='np')
SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='np')
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=a , image_processor=a)
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=a , images=a)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(a):
processor()
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(a)
SCREAMING_SNAKE_CASE = ["""cat""", """nasa badge"""]
SCREAMING_SNAKE_CASE = processor(text=a)
SCREAMING_SNAKE_CASE = 16
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(a):
processor()
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(a)
SCREAMING_SNAKE_CASE = [["""cat""", """nasa badge"""], ["""person"""]]
SCREAMING_SNAKE_CASE = processor(text=a)
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = len(a)
SCREAMING_SNAKE_CASE = max([len(a) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(a):
processor()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(a)
SCREAMING_SNAKE_CASE = ["""cat""", """nasa badge"""]
SCREAMING_SNAKE_CASE = processor(text=a)
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
SCREAMING_SNAKE_CASE = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=a , image_processor=a)
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(images=a , query_images=a)
self.assertListEqual(list(inputs.keys()) , ['query_pixel_values', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(a):
processor()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=a , image_processor=a)
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(a)
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a)
self.assertListEqual(a , a)
| 73 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : List[str] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 328 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase_ ( __UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = 'esm'
def __init__( self : Any , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : List[str]=3_072 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : int=1_026 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=_UpperCAmelCase , mask_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = emb_layer_norm_before
_A = token_dropout
_A = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_A = EsmFoldConfig()
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = EsmFoldConfig(**_UpperCAmelCase )
_A = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_A = get_default_vocab_list()
else:
_A = vocab_list
else:
_A = None
_A = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , _UpperCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowerCAmelCase_ ( self : Tuple ):
_A = super().to_dict()
if isinstance(self.esmfold_config , _UpperCAmelCase ):
_A = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = None
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : float = 0
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : int = 128
UpperCAmelCase : "TrunkConfig" = None
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.trunk is None:
_A = TrunkConfig()
elif isinstance(self.trunk , _UpperCAmelCase ):
_A = TrunkConfig(**self.trunk )
def lowerCAmelCase_ ( self : str ):
_A = asdict(self )
_A = self.trunk.to_dict()
return output
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : int = 48
UpperCAmelCase : int = 1024
UpperCAmelCase : int = 128
UpperCAmelCase : int = 32
UpperCAmelCase : int = 32
UpperCAmelCase : int = 32
UpperCAmelCase : float = 0
UpperCAmelCase : float = 0
UpperCAmelCase : bool = False
UpperCAmelCase : int = 4
UpperCAmelCase : Optional[int] = 128
UpperCAmelCase : "StructureModuleConfig" = None
def lowerCAmelCase_ ( self : Tuple ):
if self.structure_module is None:
_A = StructureModuleConfig()
elif isinstance(self.structure_module , _UpperCAmelCase ):
_A = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
_A = self.sequence_state_dim // self.sequence_head_width
_A = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = asdict(self )
_A = self.structure_module.to_dict()
return output
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : int = 384
UpperCAmelCase : int = 128
UpperCAmelCase : int = 16
UpperCAmelCase : int = 128
UpperCAmelCase : int = 12
UpperCAmelCase : int = 4
UpperCAmelCase : int = 8
UpperCAmelCase : float = 0.1
UpperCAmelCase : int = 8
UpperCAmelCase : int = 1
UpperCAmelCase : int = 2
UpperCAmelCase : int = 7
UpperCAmelCase : int = 10
UpperCAmelCase : float = 1E-8
UpperCAmelCase : float = 1E5
def lowerCAmelCase_ ( self : int ):
return asdict(self )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def UpperCAmelCase ( snake_case : List[str] , snake_case : Any , snake_case : Tuple , snake_case : List[Any] , snake_case : str ):
_lowerCAmelCase:Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase:int = np.zeros((n + 1,) )
_lowerCAmelCase:List[str] = ya
_lowerCAmelCase:Any = xa
for k in range(lowerCamelCase__ ):
_lowerCAmelCase:List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
_lowerCAmelCase:Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class a ( __UpperCAmelCase ):
snake_case_ = ['image_processor', 'feature_extractor']
snake_case_ = 'TvltImageProcessor'
snake_case_ = 'TvltFeatureExtractor'
def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_ )
snake_case_ = image_processor
snake_case_ = feature_extractor
def __call__( self : int , lowercase_ : str=None , lowercase_ : Tuple=None , lowercase_ : Dict=None , lowercase_ : List[Any]=None , lowercase_ : List[Any]=False , lowercase_ : Tuple=False , *lowercase_ : int , **lowercase_ : int , ):
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
snake_case_ = None
if images is not None:
snake_case_ = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_ )
if images_mixed is not None:
snake_case_ = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_ )
if audio is not None:
snake_case_ = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_ )
snake_case_ = {}
if audio is not None:
output_dict.update(lowercase_ )
if images is not None:
output_dict.update(lowercase_ )
if images_mixed_dict is not None:
output_dict.update(lowercase_ )
return output_dict
@property
def A_ ( self : Dict ):
snake_case_ = self.image_processor.model_input_names
snake_case_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 640 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.